diff options
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
| -rw-r--r-- | drivers/scsi/smartpqi/smartpqi_init.c | 243 |
1 files changed, 202 insertions, 41 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 870f37b70546..fe549e2b7c94 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -19,7 +19,8 @@ #include <linux/bcd.h> #include <linux/reboot.h> #include <linux/cciss_ioctl.h> -#include <linux/blk-mq-pci.h> +#include <linux/crash_dump.h> +#include <linux/string.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -33,11 +34,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.30-031" +#define DRIVER_VERSION "2.1.36-026" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 30 -#define DRIVER_REVISION 31 +#define DRIVER_RELEASE 36 +#define DRIVER_REVISION 26 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -68,6 +69,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) static void pqi_verify_structures(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -2011,18 +2013,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, PQI_DEV_INFO_BUFFER_LENGTH - count, "-:-"); - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device)) { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %08x%08x", *((u32 *)&device->scsi3addr), *((u32 *)&device->scsi3addr[4])); - else + } else if (ctrl_info->rpl_extended_format_4_5_supported) { + if (device->device_type == SA_DEVICE_TYPE_NVME) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx%016llx", + get_unaligned_be64(&device->wwid[0]), + get_unaligned_be64(&device->wwid[8])); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } else { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, - " %016llx%016llx", - get_unaligned_be64(&device->wwid[0]), - get_unaligned_be64(&device->wwid[8])); + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", @@ -3811,7 +3826,8 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t) { int num_interrupts; u32 heartbeat_count; - struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); + struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t, + heartbeat_timer); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) @@ -3854,7 +3870,7 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) { - del_timer_sync(&ctrl_info->heartbeat_timer); + timer_delete_sync(&ctrl_info->heartbeat_timer); } static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, @@ -5247,7 +5263,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) ctrl_info->error_buffer_length = ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; - if (reset_devices) + if (is_kdump_kernel()) max_transfer_size = min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE_KDUMP); else @@ -5276,18 +5292,17 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) u16 num_elements_per_iq; u16 num_elements_per_oq; - if (reset_devices) { + if (is_kdump_kernel()) { num_queue_groups = 1; } else { - int num_cpus; int max_queue_groups; max_queue_groups = min(ctrl_info->max_inbound_queues / 2, ctrl_info->max_outbound_queues - 1); max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); - num_cpus = num_online_cpus(); - num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); + num_queue_groups = + blk_mq_num_online_queues(ctrl_info->max_msix_vectors); num_queue_groups = min(num_queue_groups, max_queue_groups); } @@ -5540,14 +5555,25 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } +/* + * Adjust the timeout value for physical devices sent to the firmware + * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds. + * + * This provides the firmware with additional time to attempt early recovery + * before the OS-level timeout occurs. + */ +#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv)) + static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, bool io_high_prio) { int rc; + u32 timeout; size_t cdb_length; struct pqi_io_request *io_request; struct pqi_raid_path_request *request; + struct request *rq; io_request = pqi_alloc_io_request(ctrl_info, scmd); if (!io_request) @@ -5619,6 +5645,12 @@ static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, return SCSI_MLQUEUE_HOST_BUSY; } + if (device->is_physical_device) { + rq = scsi_cmd_to_rq(scmd); + timeout = rq->timeout / HZ; + put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout); + } + pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); return 0; @@ -5990,7 +6022,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; return true; } @@ -6069,7 +6101,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -6395,10 +6427,22 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) { + unsigned long flags; int rc; mutex_lock(&ctrl_info->lun_reset_mutex); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) { + dev_warn(&ctrl_info->pci_dev->dev, + "skipping reset of scsi %d:%d:%d:%u, device has been removed\n", + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + mutex_unlock(&ctrl_info->lun_reset_mutex); + return 0; + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + dev_err(&ctrl_info->pci_dev->dev, "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); @@ -6490,7 +6534,7 @@ out: return SUCCESS; } -static int pqi_slave_alloc(struct scsi_device *sdev) +static int pqi_sdev_init(struct scsi_device *sdev) { struct pqi_scsi_dev *device; unsigned long flags; @@ -6547,10 +6591,10 @@ static void pqi_map_queues(struct Scsi_Host *shost) struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); if (!ctrl_info->disable_managed_interrupts) - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ctrl_info->pci_dev, 0); + blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], + &ctrl_info->pci_dev->dev, 0); else - return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); + blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); } static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) @@ -6558,7 +6602,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; } -static int pqi_slave_configure(struct scsi_device *sdev) +static int pqi_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { int rc = 0; struct pqi_scsi_dev *device; @@ -6574,11 +6619,13 @@ static int pqi_slave_configure(struct scsi_device *sdev) return rc; } -static void pqi_slave_destroy(struct scsi_device *sdev) +static void pqi_sdev_destroy(struct scsi_device *sdev) { struct pqi_ctrl_info *ctrl_info; struct pqi_scsi_dev *device; + struct pqi_tmf_work *tmf_work; int mutex_acquired; + unsigned int lun; unsigned long flags; ctrl_info = shost_to_hba(sdev->host); @@ -6605,8 +6652,13 @@ static void pqi_slave_destroy(struct scsi_device *sdev) mutex_unlock(&ctrl_info->scan_mutex); + for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) + cancel_work_sync(&tmf_work->work_struct); + + mutex_lock(&ctrl_info->lun_reset_mutex); pqi_dev_info(ctrl_info, "removed", device); pqi_free_device(device); + mutex_unlock(&ctrl_info->lun_reset_mutex); } static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) @@ -6759,17 +6811,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) } if (iocommand.buf_size > 0) { - kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); - if (!kernel_buffer) - return -ENOMEM; if (iocommand.Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(kernel_buffer, iocommand.buf, - iocommand.buf_size)) { - rc = -EFAULT; - goto out; - } + kernel_buffer = memdup_user(iocommand.buf, + iocommand.buf_size); + if (IS_ERR(kernel_buffer)) + return PTR_ERR(kernel_buffer); } else { - memset(kernel_buffer, 0, iocommand.buf_size); + kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL); + if (!kernel_buffer) + return -ENOMEM; } } @@ -7549,9 +7599,9 @@ static const struct scsi_host_template pqi_driver_template = { .eh_device_reset_handler = pqi_eh_device_reset_handler, .eh_abort_handler = pqi_eh_abort_handler, .ioctl = pqi_ioctl, - .slave_alloc = pqi_slave_alloc, - .slave_configure = pqi_slave_configure, - .slave_destroy = pqi_slave_destroy, + .sdev_init = pqi_sdev_init, + .sdev_configure = pqi_sdev_configure, + .sdev_destroy = pqi_sdev_destroy, .map_queues = pqi_map_queues, .sdev_groups = pqi_sdev_groups, .shost_groups = pqi_shost_groups, @@ -8288,12 +8338,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) u32 product_id; if (reset_devices) { - if (pqi_is_fw_triage_supported(ctrl_info)) { + if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { rc = sis_wait_for_fw_triage_completion(ctrl_info); if (rc) return rc; } - if (sis_is_ctrl_logging_supported(ctrl_info)) { + if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { sis_notify_kdump(ctrl_info); rc = sis_wait_for_ctrl_logging_completion(ctrl_info); if (rc) @@ -8344,7 +8394,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->product_id = (u8)product_id; ctrl_info->product_revision = (u8)(product_id >> 8); - if (reset_devices) { + if (is_kdump_kernel()) { if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) ctrl_info->max_outstanding_requests = @@ -8480,7 +8530,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - if (ctrl_info->ctrl_logging_supported && !reset_devices) { + if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); } @@ -8922,7 +8972,8 @@ static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) goto out; - host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); + host_memory_descriptor->host_chunk_virt_address = + kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); if (!host_memory_descriptor->host_chunk_virt_address) goto out; @@ -9128,6 +9179,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + pqi_take_ctrl_devices_offline(ctrl_info); } static void pqi_ctrl_offline_worker(struct work_struct *work) @@ -9202,6 +9254,27 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, schedule_work(&ctrl_info->ctrl_offline_work); } +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); + if (rc) + continue; + + /* + * Is the sdev pointer NULL? + */ + if (device->sdev) + scsi_device_set_state(device->sdev, SDEV_OFFLINE); + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { @@ -9710,6 +9783,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x00a1) }, { @@ -10046,6 +10123,34 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4084) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4094) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4140) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4240) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4840) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) }, { @@ -10262,6 +10367,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1018, 0x8238) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3f, 0x0610) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0220) }, { @@ -10270,10 +10383,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0222) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0223) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0224) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0225) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0520) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0521) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0522) }, { @@ -10294,6 +10427,26 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0624) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0625) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0626) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0627) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0628) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1014, 0x0718) }, { @@ -10322,6 +10475,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ded, 0x3301) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x0045) }, { @@ -10470,6 +10627,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x100e) }, { |
