diff options
Diffstat (limited to 'drivers/scsi/hpsa.c')
| -rw-r--r-- | drivers/scsi/hpsa.c | 1666 |
1 files changed, 1051 insertions, 615 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4f7cdb28bd38..3654b12c5d5a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1,5 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers + * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries * Copyright 2016 Microsemi Corporation * Copyright 2014-2015 PMC-Sierra, Inc. * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. @@ -21,7 +22,6 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> @@ -51,7 +51,7 @@ #include <linux/jiffies.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <asm/div64.h> #include "hpsa_cmd.h" #include "hpsa.h" @@ -60,7 +60,7 @@ * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' * with an optional trailing '-' followed by a byte value (0-255). */ -#define HPSA_DRIVER_VERSION "3.4.20-0" +#define HPSA_DRIVER_VERSION "3.4.20-200" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -73,19 +73,17 @@ /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 +/* How long to wait before giving up on a command */ +#define HPSA_EH_PTRAID_TIMEOUT (240 * HZ) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ HPSA_DRIVER_VERSION); -MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); +MODULE_ALIAS("cciss"); -static int hpsa_allow_any; -module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(hpsa_allow_any, - "Allow hpsa driver to access unknown HP Smart Array hardware"); static int hpsa_simple_mode; module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_simple_mode, @@ -148,6 +146,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; @@ -158,6 +158,26 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); * access = Address of the struct of function pointers */ static struct board_type products[] = { + {0x40700E11, "Smart Array 5300", &SA5A_access}, + {0x40800E11, "Smart Array 5i", &SA5B_access}, + {0x40820E11, "Smart Array 532", &SA5B_access}, + {0x40830E11, "Smart Array 5312", &SA5B_access}, + {0x409A0E11, "Smart Array 641", &SA5A_access}, + {0x409B0E11, "Smart Array 642", &SA5A_access}, + {0x409C0E11, "Smart Array 6400", &SA5A_access}, + {0x409D0E11, "Smart Array 6400 EM", &SA5A_access}, + {0x40910E11, "Smart Array 6i", &SA5A_access}, + {0x3225103C, "Smart Array P600", &SA5A_access}, + {0x3223103C, "Smart Array P800", &SA5A_access}, + {0x3234103C, "Smart Array P400", &SA5A_access}, + {0x3235103C, "Smart Array P400i", &SA5A_access}, + {0x3211103C, "Smart Array E200i", &SA5A_access}, + {0x3212103C, "Smart Array E200", &SA5A_access}, + {0x3213103C, "Smart Array E200i", &SA5A_access}, + {0x3214103C, "Smart Array E200i", &SA5A_access}, + {0x3215103C, "Smart Array E200i", &SA5A_access}, + {0x3237103C, "Smart Array E500", &SA5A_access}, + {0x323D103C, "Smart Array P700m", &SA5A_access}, {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access}, @@ -232,10 +252,15 @@ static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg); +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand); +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc); #ifdef CONFIG_COMPAT -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif @@ -258,9 +283,10 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); -static int hpsa_slave_alloc(struct scsi_device *sdev); -static int hpsa_slave_configure(struct scsi_device *sdev); -static void hpsa_slave_destroy(struct scsi_device *sdev); +static int hpsa_sdev_init(struct scsi_device *sdev); +static int hpsa_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim); +static void hpsa_sdev_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h); static int check_for_unit_attention(struct ctlr_info *h, @@ -278,7 +304,8 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u64 *cfg_offset); static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); -static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board); static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[], int reply_queue); @@ -323,11 +350,6 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c) return c->scsi_cmd == SCSI_CMD_IDLE; } -static inline bool hpsa_is_pending_event(struct CommandList *c) -{ - return c->reset_pending; -} - /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ static void decode_sense_data(const u8 *sense_data, int sense_data_len, u8 *sense_key, u8 *asc, u8 *ascq) @@ -431,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int status, len; + int status; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); - char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; - strncpy(tmpbuf, buf, len); - tmpbuf[len] = '\0'; - if (sscanf(tmpbuf, "%d", &status) != 1) + if (kstrtoint(buf, 10, &status)) return -EINVAL; h = shost_to_hba(shost); h->acciopath_status = !!status; @@ -455,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int debug_level, len; + int debug_level; struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); - char tmpbuf[10]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; - strncpy(tmpbuf, buf, len); - tmpbuf[len] = '\0'; - if (sscanf(tmpbuf, "%d", &debug_level) != 1) + if (kstrtoint(buf, 10, &debug_level)) return -EINVAL; if (debug_level < 0) debug_level = 0; @@ -487,6 +501,12 @@ static ssize_t host_store_rescan(struct device *dev, return count; } +static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { @@ -767,7 +787,12 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, } offload_enabled = hdev->offload_enabled; spin_unlock_irqrestore(&h->lock, flags); - return snprintf(buf, 20, "%d\n", offload_enabled); + + if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) + return snprintf(buf, 20, "%d\n", offload_enabled); + else + return snprintf(buf, 40, "%s\n", + "Not applicable for a controller"); } #define MAX_PATHS 8 @@ -866,14 +891,24 @@ static ssize_t host_show_ctlr_num(struct device *dev, return snprintf(buf, 20, "%d\n", h->ctlr); } -static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); -static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); -static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); +static ssize_t host_show_legacy_board(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0); +} + +static DEVICE_ATTR_RO(raid_level); +static DEVICE_ATTR_RO(lunid); +static DEVICE_ATTR_RO(unique_id); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); -static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL); +static DEVICE_ATTR_RO(sas_address); static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, host_show_hp_ssd_smart_path_enabled, NULL); -static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); +static DEVICE_ATTR_RO(path_info); static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, host_show_hp_ssd_smart_path_status, host_store_hp_ssd_smart_path_status); @@ -891,34 +926,41 @@ static DEVICE_ATTR(lockup_detected, S_IRUGO, host_show_lockup_detected, NULL); static DEVICE_ATTR(ctlr_num, S_IRUGO, host_show_ctlr_num, NULL); - -static struct device_attribute *hpsa_sdev_attrs[] = { - &dev_attr_raid_level, - &dev_attr_lunid, - &dev_attr_unique_id, - &dev_attr_hp_ssd_smart_path_enabled, - &dev_attr_path_info, - &dev_attr_sas_address, +static DEVICE_ATTR(legacy_board, S_IRUGO, + host_show_legacy_board, NULL); + +static struct attribute *hpsa_sdev_attrs[] = { + &dev_attr_raid_level.attr, + &dev_attr_lunid.attr, + &dev_attr_unique_id.attr, + &dev_attr_hp_ssd_smart_path_enabled.attr, + &dev_attr_path_info.attr, + &dev_attr_sas_address.attr, NULL, }; -static struct device_attribute *hpsa_shost_attrs[] = { - &dev_attr_rescan, - &dev_attr_firmware_revision, - &dev_attr_commands_outstanding, - &dev_attr_transport_mode, - &dev_attr_resettable, - &dev_attr_hp_ssd_smart_path_status, - &dev_attr_raid_offload_debug, - &dev_attr_lockup_detected, - &dev_attr_ctlr_num, +ATTRIBUTE_GROUPS(hpsa_sdev); + +static struct attribute *hpsa_shost_attrs[] = { + &dev_attr_rescan.attr, + &dev_attr_firmware_revision.attr, + &dev_attr_commands_outstanding.attr, + &dev_attr_transport_mode.attr, + &dev_attr_resettable.attr, + &dev_attr_hp_ssd_smart_path_status.attr, + &dev_attr_raid_offload_debug.attr, + &dev_attr_lockup_detected.attr, + &dev_attr_ctlr_num.attr, + &dev_attr_legacy_board.attr, NULL, }; +ATTRIBUTE_GROUPS(hpsa_shost); + #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ HPSA_MAX_CONCURRENT_PASSTHRUS) -static struct scsi_host_template hpsa_driver_template = { +static const struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = HPSA, .proc_name = HPSA, @@ -927,18 +969,17 @@ static struct scsi_host_template hpsa_driver_template = { .scan_finished = hpsa_scan_finished, .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, - .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, - .slave_alloc = hpsa_slave_alloc, - .slave_configure = hpsa_slave_configure, - .slave_destroy = hpsa_slave_destroy, + .sdev_init = hpsa_sdev_init, + .sdev_configure = hpsa_sdev_configure, + .sdev_destroy = hpsa_sdev_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, #endif - .sdev_attrs = hpsa_sdev_attrs, - .shost_attrs = hpsa_shost_attrs, - .max_sectors = 1024, + .sdev_groups = hpsa_sdev_groups, + .shost_groups = hpsa_shost_groups, + .max_sectors = 2048, .no_write_same = 1, }; @@ -1007,11 +1048,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); if (unlikely(!h->msix_vectors)) return; - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - c->Header.ReplyQueue = - raw_smp_processor_id() % h->nreply_queues; - else - c->Header.ReplyQueue = reply_queue % h->nreply_queues; + c->Header.ReplyQueue = reply_queue; } } @@ -1025,10 +1062,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; - else - cp->ReplyQueue = reply_queue % h->nreply_queues; + cp->ReplyQueue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit (bit 0) @@ -1049,10 +1083,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) @@ -1071,10 +1102,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h, * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) - cp->reply_queue = smp_processor_id() % h->nreply_queues; - else - cp->reply_queue = reply_queue % h->nreply_queues; + cp->reply_queue = reply_queue; /* * Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 @@ -1119,6 +1147,13 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); + /* + * Check to see if the command is being retried. + */ + if (c->device && !c->retry_pending) + atomic_inc(&c->device->commands_outstanding); + + reply_queue = h->reply_map[raw_smp_processor_id()]; switch (c->cmd_type) { case CMD_IOACCEL1: set_ioaccel1_performant_mode(h, c, reply_queue); @@ -1140,9 +1175,6 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h, static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) { - if (unlikely(hpsa_is_pending_event(c))) - return finish_cmd(c); - __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); } @@ -1237,7 +1269,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, dev->model, label, dev->offload_config ? '+' : '-', - dev->offload_enabled ? '+' : '-', + dev->offload_to_be_enabled ? '+' : '-', dev->expose_device); } @@ -1301,7 +1333,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, dev_warn(&h->pdev->dev, "physical device with no LUN=0," " suspect firmware bug or unsupported hardware " "configuration.\n"); - return -1; + return -1; } lun_assigned: @@ -1312,36 +1344,42 @@ lun_assigned: (*nadded)++; hpsa_show_dev_msg(KERN_INFO, h, device, device->expose_device ? "added" : "masked"); - device->offload_to_be_enabled = device->offload_enabled; - device->offload_enabled = 0; return 0; } -/* Update an entry in h->dev[] array. */ +/* + * Called during a scan operation. + * + * Update an entry in h->dev[] array. + */ static void hpsa_scsi_update_entry(struct ctlr_info *h, int entry, struct hpsa_scsi_dev_t *new_entry) { - int offload_enabled; /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); /* Raid level changed. */ h->dev[entry]->raid_level = new_entry->raid_level; + /* + * ioacccel_handle may have changed for a dual domain disk + */ + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + /* Raid offload parameters changed. Careful about the ordering. */ - if (new_entry->offload_config && new_entry->offload_enabled) { + if (new_entry->offload_config && new_entry->offload_to_be_enabled) { /* * if drive is newly offload_enabled, we want to copy the * raid map data first. If previously offload_enabled and * offload_config were set, raid map data had better be - * the same as it was before. if raid map data is changed + * the same as it was before. If raid map data has changed * then it had better be the case that * h->dev[entry]->offload_enabled is currently 0. */ h->dev[entry]->raid_map = new_entry->raid_map; h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; } - if (new_entry->hba_ioaccel_enabled) { + if (new_entry->offload_to_be_enabled) { h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ } @@ -1352,17 +1390,18 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, /* * We can turn off ioaccel offload now, but need to delay turning - * it on until we can update h->dev[entry]->phys_disk[], but we + * ioaccel on until we can update h->dev[entry]->phys_disk[], but we * can't do that until all the devices are updated. */ - h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; - if (!new_entry->offload_enabled) + h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; + + /* + * turn ioaccel off immediately if told to do so. + */ + if (!new_entry->offload_to_be_enabled) h->dev[entry]->offload_enabled = 0; - offload_enabled = h->dev[entry]->offload_enabled; - h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); - h->dev[entry]->offload_enabled = offload_enabled; } /* Replace an entry from h->dev[] array. */ @@ -1388,9 +1427,8 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; + hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); - new_entry->offload_to_be_enabled = new_entry->offload_enabled; - new_entry->offload_enabled = 0; } /* Remove an entry from h->dev[] array. */ @@ -1480,11 +1518,22 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1, return 1; if (dev1->offload_config != dev2->offload_config) return 1; - if (dev1->offload_enabled != dev2->offload_enabled) + if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled) return 1; if (!is_logical_dev_addr_mode(dev1->scsi3addr)) if (dev1->queue_depth != dev2->queue_depth) return 1; + /* + * This can happen for dual domain devices. An active + * path change causes the ioaccel handle to change + * + * for example note the handle differences between p0 and p1 + * Device WWN ,WWN hash,Handle + * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003 + * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004 + */ + if (dev1->ioaccel_handle != dev2->ioaccel_handle) + return 1; return 0; } @@ -1694,8 +1743,12 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, * be 0, but we'll turn it off here just in case */ if (!logical_drive->phys_disk[i]) { - logical_drive->offload_enabled = 0; - logical_drive->offload_to_be_enabled = 0; + dev_warn(&h->pdev->dev, + "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n", + __func__, + h->scsi_host->host_no, logical_drive->bus, + logical_drive->target, logical_drive->lun); + hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } @@ -1705,8 +1758,12 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, * way too high for partial stripe writes */ logical_drive->queue_depth = qdepth; - else - logical_drive->queue_depth = h->nr_cmds; + else { + if (logical_drive->external) + logical_drive->queue_depth = EXTERNAL_QD; + else + logical_drive->queue_depth = h->nr_cmds; + } } static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, @@ -1726,13 +1783,24 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, /* * If offload is currently enabled, the RAID map and * phys_disk[] assignment *better* not be changing - * and since it isn't changing, we do not need to - * update it. + * because we would be changing ioaccel phsy_disk[] pointers + * on a ioaccel volume processing I/O requests. + * + * If an ioaccel volume status changed, initially because it was + * re-configured and thus underwent a transformation, or + * a drive failed, we would have received a state change + * request and ioaccel should have been turned off. When the + * transformation completes, we get another state change + * request to turn ioaccel back on. In this case, we need + * to update the ioaccel information. + * + * Thus: If it is not currently enabled, but will be after + * the scan completes, make sure the ioaccel pointers + * are up to date. */ - if (dev[i]->offload_enabled) - continue; - hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); + if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled) + hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); } } @@ -1778,23 +1846,33 @@ static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h, return count; } +#define NUM_WAIT 20 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { int cmds = 0; int waits = 0; + int num_wait = NUM_WAIT; + + if (device->external) + num_wait = HPSA_EH_PTRAID_TIMEOUT; while (1) { cmds = hpsa_find_outstanding_commands_for_dev(h, device); if (cmds == 0) break; - if (++waits > 20) + if (++waits > num_wait) break; - dev_warn(&h->pdev->dev, - "%s: removing device with %d outstanding commands!\n", - __func__, cmds); msleep(1000); } + + if (waits > num_wait) { + dev_warn(&h->pdev->dev, + "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n", + __func__, + h->scsi_host->host_no, + device->bus, device->target, device->lun, cmds); + } } static void hpsa_remove_device(struct ctlr_info *h, @@ -1805,6 +1883,12 @@ static void hpsa_remove_device(struct ctlr_info *h, if (!h->scsi_host) return; + /* + * Allow for commands to drain + */ + device->removed = 1; + hpsa_wait_for_outstanding_commands_for_dev(h, device); + if (is_logical_device(device)) { /* RAID */ sdev = scsi_device_lookup(h->scsi_host, device->bus, device->target, device->lun); @@ -1822,9 +1906,6 @@ static void hpsa_remove_device(struct ctlr_info *h, } } else { /* HBA */ - device->removed = 1; - hpsa_wait_for_outstanding_commands_for_dev(h, device); - hpsa_remove_sas_device(device); } } @@ -1854,8 +1935,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, } spin_unlock_irqrestore(&h->reset_lock, flags); - added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); - removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); + added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL); + removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL); if (!added || !removed) { dev_warn(&h->pdev->dev, "out of memory in " @@ -1932,8 +2013,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, } hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); - /* Now that h->dev[]->phys_disk[] is coherent, we can enable + /* + * Now that h->dev[]->phys_disk[] is coherent, we can enable * any logical drives that need it enabled. + * + * The raid map should be current by now. + * + * We are updating the device list used for I/O requests. */ for (i = 0; i < h->ndevices; i++) { if (h->dev[i] == NULL) @@ -2014,7 +2100,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, return NULL; } -static int hpsa_slave_alloc(struct scsi_device *sdev) +static int hpsa_sdev_init(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd = NULL; unsigned long flags; @@ -2048,7 +2134,9 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) } /* configure scsi device based on internal per-device structure */ -static int hpsa_slave_configure(struct scsi_device *sdev) +#define CTLR_TIMEOUT (120 * HZ) +static int hpsa_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct hpsa_scsi_dev_t *sd; int queue_depth; @@ -2057,22 +2145,36 @@ static int hpsa_slave_configure(struct scsi_device *sdev) sdev->no_uld_attach = !sd || !sd->expose_device; if (sd) { - if (sd->external) + sd->was_removed = 0; + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + if (sd->external) { queue_depth = EXTERNAL_QD; - else - queue_depth = sd->queue_depth != 0 ? - sd->queue_depth : sdev->host->can_queue; - } else + sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, + HPSA_EH_PTRAID_TIMEOUT); + } + if (is_hba_lunid(sd->scsi3addr)) { + sdev->eh_timeout = CTLR_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT); + } + } else { queue_depth = sdev->host->can_queue; + } scsi_change_queue_depth(sdev, queue_depth); return 0; } -static void hpsa_slave_destroy(struct scsi_device *sdev) +static void hpsa_sdev_destroy(struct scsi_device *sdev) { - /* nothing to do. */ + struct hpsa_scsi_dev_t *hdev = NULL; + + hdev = sdev->hostdata; + + if (hdev) + hdev->was_removed = 1; } static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) @@ -2097,14 +2199,15 @@ static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) return 0; h->ioaccel2_cmd_sg_list = - kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, + kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list), GFP_KERNEL); if (!h->ioaccel2_cmd_sg_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->ioaccel2_cmd_sg_list[i] = - kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * - h->maxsgentries, GFP_KERNEL); + kmalloc_array(h->maxsgentries, + sizeof(*h->ioaccel2_cmd_sg_list[i]), + GFP_KERNEL); if (!h->ioaccel2_cmd_sg_list[i]) goto clean; } @@ -2136,14 +2239,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) if (h->chainsize <= 0) return 0; - h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, - GFP_KERNEL); + h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list), + GFP_KERNEL); if (!h->cmd_sg_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { - h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * - h->chainsize, GFP_KERNEL); + h->cmd_sg_list[i] = kmalloc_array(h->chainsize, + sizeof(*h->cmd_sg_list[i]), + GFP_KERNEL); if (!h->cmd_sg_list[i]) goto clean; @@ -2164,8 +2268,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; chain_size = le32_to_cpu(cp->sg[0].length); - temp64 = pci_map_single(h->pdev, chain_block, chain_size, - PCI_DMA_TODEVICE); + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size, + DMA_TO_DEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ cp->sg->address = 0; @@ -2185,7 +2289,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, chain_sg = cp->sg; temp64 = le64_to_cpu(chain_sg->address); chain_size = le32_to_cpu(cp->sg[0].length); - pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); + dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE); } static int hpsa_map_sg_chain_block(struct ctlr_info *h, @@ -2201,8 +2305,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h, chain_len = sizeof(*chain_sg) * (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); chain_sg->Len = cpu_to_le32(chain_len); - temp64 = pci_map_single(h->pdev, chain_block, chain_len, - PCI_DMA_TODEVICE); + temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len, + DMA_TO_DEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ chain_sg->Addr = cpu_to_le64(0); @@ -2221,8 +2325,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, return; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; - pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), - le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); + dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr), + le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE); } @@ -2244,6 +2348,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_SERV_RESPONSE_COMPLETE: switch (c2->error_data.status) { case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + if (cmd) + cmd->result = 0; break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: cmd->result |= SAM_STAT_CHECK_CONDITION; @@ -2290,7 +2396,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, break; case IOACCEL2_STATUS_SR_UNDERRUN: cmd->result = (DID_OK << 16); /* host byte */ - cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ ioaccel2_resid = get_unaligned_le32( &c2->error_data.resid_cnt[0]); scsi_set_resid(cmd, ioaccel2_resid); @@ -2338,13 +2443,16 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, break; } + if (dev->in_reset) + retry = 0; + return retry; /* retry on raid path? */ } static void hpsa_cmd_resolve_events(struct ctlr_info *h, struct CommandList *c) { - bool do_wake = false; + struct hpsa_scsi_dev_t *dev = c->device; /* * Reset c->scsi_cmd here so that the reset handler will know @@ -2353,25 +2461,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h, */ c->scsi_cmd = SCSI_CMD_IDLE; mb(); /* Declare command idle before checking for pending events. */ - if (c->reset_pending) { - unsigned long flags; - struct hpsa_scsi_dev_t *dev; - - /* - * There appears to be a reset pending; lock the lock and - * reconfirm. If so, then decrement the count of outstanding - * commands and wake the reset command if this is the last one. - */ - spin_lock_irqsave(&h->lock, flags); - dev = c->reset_pending; /* Re-fetch under the lock. */ - if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) - do_wake = true; - c->reset_pending = NULL; - spin_unlock_irqrestore(&h->lock, flags); + if (dev) { + atomic_dec(&dev->commands_outstanding); + if (dev->in_reset && + atomic_read(&dev->commands_outstanding) <= 0) + wake_up_all(&h->event_sync_wait_queue); } - - if (do_wake) - wake_up_all(&h->event_sync_wait_queue); } static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, @@ -2385,8 +2480,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd) { hpsa_cmd_resolve_and_free(h, c); - if (cmd && cmd->scsi_done) - cmd->scsi_done(cmd); + if (cmd) + scsi_done(cmd); } static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) @@ -2403,12 +2498,14 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) + c2->error_data.status == 0)) { + cmd->result = 0; return hpsa_cmd_free_and_done(h, c, cmd); + } /* * Any RAID offload error results in retry which will use - * the normal I/O path so the controller can handle whatever's + * the normal I/O path so the controller can handle whatever is * wrong. */ if (is_logical_device(dev) && @@ -2416,8 +2513,12 @@ static void process_ioaccel2_completion(struct ctlr_info *h, IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { - dev->offload_enabled = 0; - dev->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(dev); + } + + if (dev->in_reset) { + cmd->result = DID_RESET << 16; + return hpsa_cmd_free_and_done(h, c, cmd); } return hpsa_retry_cmd(h, c); @@ -2495,8 +2596,13 @@ static void complete_scsi_command(struct CommandList *cp) (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) hpsa_unmap_ioaccel2_sg_chain_block(h, c2); - cmd->result = (DID_OK << 16); /* host byte */ - cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + cmd->result = (DID_OK << 16); /* host byte */ + + /* SCSI command has already been cleaned up in SML */ + if (dev->was_removed) { + hpsa_cmd_resolve_and_free(h, cp); + return; + } if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) { if (dev->physical_device && dev->expose_device && @@ -2519,10 +2625,6 @@ static void complete_scsi_command(struct CommandList *cp) return hpsa_cmd_free_and_done(h, cp, cmd); } - if ((unlikely(hpsa_is_pending_event(cp)))) - if (cp->reset_pending) - return hpsa_cmd_free_and_done(h, cp, cmd); - if (cp->cmd_type == CMD_IOACCEL2) return process_ioaccel2_completion(h, cp, cmd, dev); @@ -2560,10 +2662,8 @@ static void complete_scsi_command(struct CommandList *cp) case CMD_TARGET_STATUS: cmd->result |= ei->ScsiStatus; /* copy the sense data */ - if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) - sense_data_size = SCSI_SENSE_BUFFERSIZE; - else - sense_data_size = sizeof(ei->SenseInfo); + sense_data_size = min_t(unsigned long, SCSI_SENSE_BUFFERSIZE, + sizeof(ei->SenseInfo)); if (ei->SenseLen < sense_data_size) sense_data_size = ei->SenseLen; memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); @@ -2571,9 +2671,20 @@ static void complete_scsi_command(struct CommandList *cp) decode_sense_data(ei->SenseInfo, sense_data_size, &sense_key, &asc, &ascq); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { - if (sense_key == ABORTED_COMMAND) { + switch (sense_key) { + case ABORTED_COMMAND: cmd->result |= DID_SOFT_ERROR << 16; break; + case UNIT_ATTENTION: + if (asc == 0x3F && ascq == 0x0E) + h->drv_req_rescan = 1; + break; + case ILLEGAL_REQUEST: + if (asc == 0x25 && ascq == 0x00) { + dev->removed = 1; + cmd->result = DID_NO_CONNECT << 16; + } + break; } break; } @@ -2683,13 +2794,13 @@ static void complete_scsi_command(struct CommandList *cp) return hpsa_cmd_free_and_done(h, cp, cmd); } -static void hpsa_pci_unmap(struct pci_dev *pdev, - struct CommandList *c, int sg_used, int data_direction) +static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, + int sg_used, enum dma_data_direction data_direction) { int i; for (i = 0; i < sg_used; i++) - pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), + dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr), le32_to_cpu(c->SG[i].Len), data_direction); } @@ -2698,17 +2809,17 @@ static int hpsa_map_one(struct pci_dev *pdev, struct CommandList *cp, unsigned char *buf, size_t buflen, - int data_direction) + enum dma_data_direction data_direction) { u64 addr64; - if (buflen == 0 || data_direction == PCI_DMA_NONE) { + if (buflen == 0 || data_direction == DMA_NONE) { cp->Header.SGList = 0; cp->Header.SGTotal = cpu_to_le16(0); return 0; } - addr64 = pci_map_single(pdev, buf, buflen, data_direction); + addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction); if (dma_mapping_error(&pdev->dev, addr64)) { /* Prevent subsequent unmap of something never mapped */ cp->Header.SGList = 0; @@ -2769,7 +2880,8 @@ static u32 lockup_detected(struct ctlr_info *h) #define MAX_DRIVER_CMD_RETRIES 25 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, - struct CommandList *c, int data_direction, unsigned long timeout_msecs) + struct CommandList *c, enum dma_data_direction data_direction, + unsigned long timeout_msecs) { int backoff_time = 10, retry_count = 0; int rc; @@ -2880,6 +2992,57 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h, } } +static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr, + u8 page, u8 *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_alloc(h); + if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize, + page, scsi3addr, TYPE_CMD)) { + rc = -1; + goto out; + } + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); + if (rc) + goto out; + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_free(h, c); + return rc; +} + +static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h, + u8 *scsi3addr) +{ + u8 *buf; + u64 sa = 0; + int rc = 0; + + buf = kzalloc(1024, GFP_KERNEL); + if (!buf) + return 0; + + rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC, + buf, 1024); + + if (rc) + goto out; + + sa = get_unaligned_be64(buf+12); + +out: + kfree(buf); + return sa; +} + static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, u16 page, unsigned char *buf, unsigned char bufsize) @@ -2895,8 +3058,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, rc = -1; goto out; } - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -2909,7 +3072,7 @@ out: return rc; } -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, +static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, u8 reset_type, int reply_queue) { int rc = IO_OK; @@ -2917,11 +3080,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, struct ErrorInfo *ei; c = cmd_alloc(h); - + c->device = dev; /* fill_cmd can't fail here, no data buffer to map. */ - (void) fill_cmd(c, reset_type, h, NULL, 0, 0, - scsi3addr, TYPE_MSG); + (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG); rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) { dev_warn(&h->pdev->dev, "Failed to send reset command\n"); @@ -2999,9 +3161,8 @@ static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, } static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, - unsigned char *scsi3addr, u8 reset_type, int reply_queue) + u8 reset_type, int reply_queue) { - int i; int rc = 0; /* We can really only handle one reset at a time */ @@ -3010,38 +3171,14 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, return -EINTR; } - BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); - - for (i = 0; i < h->nr_cmds; i++) { - struct CommandList *c = h->cmd_pool + i; - int refcount = atomic_inc_return(&c->refcount); - - if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { - unsigned long flags; - - /* - * Mark the target command as having a reset pending, - * then lock a lock so that the command cannot complete - * while we're considering it. If the command is not - * idle then count it; otherwise revoke the event. - */ - c->reset_pending = dev; - spin_lock_irqsave(&h->lock, flags); /* Implied MB */ - if (!hpsa_is_cmd_idle(c)) - atomic_inc(&dev->reset_cmds_out); - else - c->reset_pending = NULL; - spin_unlock_irqrestore(&h->lock, flags); - } - - cmd_free(h, c); - } - - rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); - if (!rc) + rc = hpsa_send_reset(h, dev, reset_type, reply_queue); + if (!rc) { + /* incremented by sending the reset request */ + atomic_dec(&dev->commands_outstanding); wait_event(h->event_sync_wait_queue, - atomic_read(&dev->reset_cmds_out) == 0 || + atomic_read(&dev->commands_outstanding) <= 0 || lockup_detected(h)); + } if (unlikely(lockup_detected(h))) { dev_warn(&h->pdev->dev, @@ -3049,10 +3186,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, rc = -ENODEV; } - if (unlikely(rc)) - atomic_set(&dev->reset_cmds_out, 0); - else - rc = wait_for_device_to_become_ready(h, scsi3addr, 0); + if (!rc) + rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0); mutex_unlock(&h->reset_mutex); return rc; @@ -3179,8 +3314,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h, cmd_free(h, c); return -1; } - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3222,8 +3357,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3250,8 +3385,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h, if (rc) goto out; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; ei = c->err_info; @@ -3281,8 +3416,8 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h, c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, - DEFAULT_TIMEOUT); + hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -3310,9 +3445,17 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, struct ErrorInfo *ei = NULL; struct bmic_sense_storage_box_params *bssbp = NULL; struct bmic_identify_physical_device *id_phys = NULL; - struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; + struct ext_report_lun_entry *rle; u16 bmic_device_index = 0; + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + + rle = &rlep->LUN[rle_index]; + + encl_dev->eli = + hpsa_get_enclosure_logical_identifier(h, scsi3addr); + bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); if (encl_dev->target == -1 || encl_dev->lun == -1) { @@ -3354,8 +3497,8 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h, else c->Request.CDB[5] = 0; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, - DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; @@ -3379,7 +3522,7 @@ out: if (rc != IO_OK) hpsa_show_dev_msg(KERN_INFO, h, encl_dev, - "Error, could not get enclosure information\n"); + "Error, could not get enclosure information"); } static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, @@ -3439,6 +3582,30 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, dev->sas_address = sa; } +static void hpsa_ext_ctrl_present(struct ctlr_info *h, + struct ReportExtendedLUNdata *physdev) +{ + u32 nphysicals; + int i; + + if (h->discovery_polling) + return; + + nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1; + + for (i = 0; i < nphysicals; i++) { + if (physdev->LUN[i].device_type == + BMIC_DEVICE_TYPE_CONTROLLER + && !is_hba_lunid(physdev->LUN[i].lunid)) { + dev_info(&h->pdev->dev, + "External controller present, activate discovery polling and disable rld caching\n"); + hpsa_disable_rld_caching(h); + h->discovery_polling = 1; + break; + } + } +} + /* Get a device id from inquiry page 0x83 */ static bool hpsa_vpd_page_supported(struct ctlr_info *h, unsigned char scsi3addr[], u8 page) @@ -3459,10 +3626,7 @@ static bool hpsa_vpd_page_supported(struct ctlr_info *h, if (rc != 0) goto exit_unsupported; pages = buf[3]; - if ((pages + HPSA_VPD_HEADER_SZ) <= 255) - bufsize = pages + HPSA_VPD_HEADER_SZ; - else - bufsize = 255; + bufsize = min(pages + HPSA_VPD_HEADER_SZ, 255); /* Get the whole VPD page list */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, @@ -3483,6 +3647,13 @@ exit_supported: return true; } +/* + * Called during a scan operation. + * Sets ioaccel status on the new device list, not the existing device list + * + * The device list used during I/O will be updated later in + * adjust_hpsa_scsi_table. + */ static void hpsa_get_ioaccel_status(struct ctlr_info *h, unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) { @@ -3511,12 +3682,19 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { - this_device->offload_enabled = + bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); - if (hpsa_get_raid_map(h, scsi3addr, this_device)) - this_device->offload_enabled = 0; + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } } - this_device->offload_to_be_enabled = this_device->offload_enabled; + out: kfree(buf); return; @@ -3565,29 +3743,32 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, buf, bufsize, 0, scsi3addr, TYPE_CMD)) { - rc = -1; + rc = -EAGAIN; goto out; } if (extended_response) c->Request.CDB[1] = extended_response; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if (rc) goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); - rc = -1; + rc = -EIO; } else { struct ReportLUNdata *rld = buf; if (rld->extended_response_flag != extended_response) { - dev_err(&h->pdev->dev, - "report luns requested format %u, got %u\n", - extended_response, - rld->extended_response_flag); - rc = -1; + if (!h->legacy_board) { + dev_err(&h->pdev->dev, + "report luns requested format %u, got %u\n", + extended_response, + rld->extended_response_flag); + rc = -EINVAL; + } else + rc = -EOPNOTSUPP; } } out: @@ -3603,7 +3784,7 @@ static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize, HPSA_REPORT_PHYS_EXTENDED); - if (!rc || !hpsa_allow_any) + if (!rc || rc != -EOPNOTSUPP) return rc; /* REPORT PHYS EXTENDED is not supported */ @@ -3693,8 +3874,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h, u8 sense_key, asc, ascq; int sense_len; int rc, ldstat = 0; - u16 cmd_status; - u8 scsi_status; #define ASC_LUN_NOT_READY 0x04 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 @@ -3703,7 +3882,7 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h, (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, - DEFAULT_TIMEOUT); + NO_TIMEOUT); if (rc) { cmd_free(h, c); return HPSA_VPD_LV_STATUS_UNSUPPORTED; @@ -3714,8 +3893,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h, else sense_len = c->err_info->SenseLen; decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); - cmd_status = c->err_info->CommandStatus; - scsi_status = c->err_info->ScsiStatus; cmd_free(h, c); /* Determine the reason for not ready state */ @@ -3791,14 +3968,18 @@ static int hpsa_update_device_info(struct ctlr_info *h, memset(this_device->device_id, 0, sizeof(this_device->device_id)); if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, - sizeof(this_device->device_id))) + sizeof(this_device->device_id)) < 0) { dev_err(&h->pdev->dev, - "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n", + "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n", h->ctlr, __func__, h->scsi_host->host_no, - this_device->target, this_device->lun, + this_device->bus, this_device->target, + this_device->lun, scsi_device_type(this_device->devtype), this_device->model); + rc = HPSA_LV_FAILED; + goto bail_out; + } if ((this_device->devtype == TYPE_DISK || this_device->devtype == TYPE_ZBC) && @@ -3809,6 +3990,16 @@ static int hpsa_update_device_info(struct ctlr_info *h, if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED && + h->legacy_board) { + /* + * Legacy boards might not support volume status + */ + dev_info(&h->pdev->dev, + "C0:T%d:L%d Volume status not available, assuming online.\n", + this_device->target, this_device->lun); + volume_offline = 0; + } this_device->volume_offline = volume_offline; if (volume_offline == HPSA_LV_FAILED) { rc = HPSA_LV_FAILED; @@ -3820,8 +4011,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; - this_device->offload_enabled = 0; - this_device->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; @@ -3936,7 +4126,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, "maximum logical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_LUN, *nlogicals - HPSA_MAX_LUN); - *nlogicals = HPSA_MAX_LUN; + *nlogicals = HPSA_MAX_LUN; } if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, @@ -3984,6 +4174,9 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, int rc; struct ext_report_lun_entry *rle; + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + rle = &rlep->LUN[rle_index]; dev->ioaccel_handle = rle->ioaccel_handle; @@ -4008,7 +4201,12 @@ static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, struct ReportExtendedLUNdata *rlep, int rle_index, struct bmic_identify_physical_device *id_phys) { - struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; + struct ext_report_lun_entry *rle; + + if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN) + return; + + rle = &rlep->LUN[rle_index]; if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) this_device->hba_ioaccel_enabled = 1; @@ -4045,7 +4243,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, memset(id_ctlr, 0, sizeof(*id_ctlr)); rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); if (!rc) - if (id_ctlr->configured_logical_drive_count < 256) + if (id_ctlr->configured_logical_drive_count < 255) *nlocals = id_ctlr->configured_logical_drive_count; else *nlocals = le16_to_cpu( @@ -4142,12 +4340,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; - int i, n_ext_target_devs, ndevs_to_allocate; + int i, ndevs_to_allocate; int raid_ctlr_position; bool physical_device; - DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); - currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); + currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL); physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); @@ -4159,7 +4356,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) dev_err(&h->pdev->dev, "out of memory\n"); goto out; } - memset(lunzerobits, 0, sizeof(lunzerobits)); h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ @@ -4182,6 +4378,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) */ ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; + hpsa_ext_ctrl_present(h, physdev_list); + /* Allocate the per device structures */ for (i = 0; i < ndevs_to_allocate; i++) { if (i >= HPSA_MAX_DEVICES) { @@ -4205,13 +4403,14 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) raid_ctlr_position = nphysicals + nlogicals; /* adjust our table of devices */ - n_ext_target_devs = 0; for (i = 0; i < nphysicals + nlogicals + 1; i++) { u8 *lunaddrbytes, is_OBDR = 0; int rc = 0; int phys_dev_index = i - (raid_ctlr_position == 0); bool skip_device = false; + memset(tmpdevice, 0, sizeof(*tmpdevice)); + physical_device = i < nphysicals + (raid_ctlr_position == 0); /* Figure out where the LUN ID info is coming from */ @@ -4226,14 +4425,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) /* * Skip over some devices such as a spare. */ - if (!tmpdevice->external && physical_device) { + if (phys_dev_index >= 0 && !tmpdevice->external && + physical_device) { skip_device = hpsa_skip_device(h, lunaddrbytes, &physdev_list->LUN[phys_dev_index]); if (skip_device) continue; } - /* Get device type, vendor, model, device id */ + /* Get device type, vendor, model, device id, raid_map */ rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, &is_OBDR); if (rc == -ENOMEM) { @@ -4250,18 +4450,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) figure_bus_target_lun(h, lunaddrbytes, tmpdevice); this_device = currentsd[ncurrent]; - /* Turn on discovery_polling if there are ext target devices. - * Event-based change notification is unreliable for those. - */ - if (!h->discovery_polling) { - if (tmpdevice->external) { - h->discovery_polling = 1; - dev_info(&h->pdev->dev, - "External target, activate discovery polling.\n"); - } - } - - *this_device = *tmpdevice; this_device->physical_device = physical_device; @@ -4378,7 +4566,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, struct scsi_cmnd *cmd) { struct scatterlist *sg; - int use_sg, i, sg_limit, chained, last_sg; + int use_sg, i, sg_limit, chained; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); @@ -4400,7 +4588,6 @@ static int hpsa_scatter_gather(struct ctlr_info *h, curr_sg = cp->SG; chained = use_sg > h->max_cmd_sg_entries; sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; - last_sg = scsi_sg_count(cmd) - 1; scsi_for_each_sg(cmd, sg, sg_limit, i) { hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; @@ -4444,21 +4631,13 @@ sglist_finished: return 0; } -#define BUFLEN 128 static inline void warn_zero_length_transfer(struct ctlr_info *h, u8 *cdb, int cdb_len, const char *func) { - char buf[BUFLEN]; - int outlen; - int i; - - outlen = scnprintf(buf, BUFLEN, - "%s: Blocking zero-length request: CDB:", func); - for (i = 0; i < cdb_len; i++) - outlen += scnprintf(buf+outlen, BUFLEN - outlen, - "%02hhx", cdb[i]); - dev_warn(&h->pdev->dev, "%s\n", buf); + dev_warn(&h->pdev->dev, + "%s: Blocking zero-length request: CDB:%*phN\n", + func, cdb_len, cdb); } #define IO_ACCEL_INELIGIBLE 1 @@ -4504,6 +4683,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) case WRITE_6: case WRITE_12: is_write = 1; + fallthrough; case READ_6: case READ_12: if (*cdb_len == 6) { @@ -4645,6 +4825,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, c->phys_disk = dev; + if (dev->in_reset) + return -1; + return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); } @@ -4765,7 +4948,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, curr_sg->reserved[0] = 0; curr_sg->reserved[1] = 0; curr_sg->reserved[2] = 0; - curr_sg->chain_indicator = 0x80; + curr_sg->chain_indicator = IOACCEL2_CHAIN; curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; } @@ -4782,6 +4965,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, curr_sg++; } + /* + * Set the last s/g element bit + */ + (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG; + switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: cp->direction &= ~IOACCEL2_DIRECTION_MASK; @@ -4830,6 +5018,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, } else cp->sg_count = (u8) use_sg; + if (phys_disk->in_reset) { + cmd->result = DID_RESET << 16; + return -1; + } + enqueue_cmd_and_start_io(h, c); return 0; } @@ -4847,6 +5040,9 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, if (!c->scsi_cmd->device->hostdata) return -1; + if (phys_disk->in_reset) + return -1; + /* Try to honor the device's queue depth */ if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > phys_disk->queue_depth) { @@ -4930,10 +5126,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, if (!dev) return -1; + if (dev->in_reset) + return -1; + /* check for valid opcode, get LBA and block count */ switch (cmd->cmnd[0]) { case WRITE_6: is_write = 1; + fallthrough; case READ_6: first_block = (((cmd->cmnd[1] & 0x1F) << 16) | (cmd->cmnd[2] << 8) | @@ -4944,6 +5144,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_10: is_write = 1; + fallthrough; case READ_10: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -4956,6 +5157,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_12: is_write = 1; + fallthrough; case READ_12: first_block = (((u64) cmd->cmnd[2]) << 24) | @@ -4970,6 +5172,7 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, break; case WRITE_16: is_write = 1; + fallthrough; case READ_16: first_block = (((u64) cmd->cmnd[2]) << 56) | @@ -5046,8 +5249,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; @@ -5055,8 +5262,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, @@ -5081,7 +5292,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); - BUG_ON(r5or6_blocks_per_row == 0); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 @@ -5230,13 +5444,13 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, */ static int hpsa_ciss_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, - unsigned char scsi3addr[]) + struct hpsa_scsi_dev_t *dev) { cmd->host_scribble = (unsigned char *) c; c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->Header.ReplyQueue = 0; /* unused in simple mode */ - memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); + memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8); c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); /* Fill in the request block... */ @@ -5287,6 +5501,14 @@ static int hpsa_ciss_submit(struct ctlr_info *h, hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } + + if (dev->in_reset) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + + c->device = dev; + enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; @@ -5339,7 +5561,7 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, static int hpsa_ioaccel_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, - unsigned char *scsi3addr) + bool retry) { struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; int rc = IO_ACCEL_INELIGIBLE; @@ -5347,19 +5569,31 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, if (!dev) return SCSI_MLQUEUE_HOST_BUSY; + if (dev->in_reset) + return SCSI_MLQUEUE_HOST_BUSY; + + if (hpsa_simple_mode) + return IO_ACCEL_INELIGIBLE; + cmd->host_scribble = (unsigned char *) c; if (dev->offload_enabled) { - hpsa_cmd_init(h, c->cmdindex, c); + hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; + if (retry) /* Resubmit but do not increment device->commands_outstanding. */ + c->retry_pending = true; rc = hpsa_scsi_ioaccel_raid_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; } else if (dev->hba_ioaccel_enabled) { - hpsa_cmd_init(h, c->cmdindex, c); + hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */ c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; + if (retry) /* Resubmit but do not increment device->commands_outstanding. */ + c->retry_pending = true; rc = hpsa_scsi_ioaccel_direct_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; @@ -5379,8 +5613,12 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) cmd->result = DID_NO_CONNECT << 16; return hpsa_cmd_free_and_done(c->h, c, cmd); } - if (c->reset_pending) + + if (dev->in_reset) { + cmd->result = DID_RESET << 16; return hpsa_cmd_free_and_done(c->h, c, cmd); + } + if (c->cmd_type == CMD_IOACCEL2) { struct ctlr_info *h = c->h; struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; @@ -5388,7 +5626,8 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) if (c2->error_data.serv_response == IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { - rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); + /* Resubmit with the retry_pending flag set. */ + rc = hpsa_ioaccel_submit(h, c, cmd, true); if (rc == 0) return; if (rc == SCSI_MLQUEUE_HOST_BUSY) { @@ -5404,7 +5643,16 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) } } hpsa_cmd_partial_init(c->h, c->cmdindex, c); - if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { + /* + * Here we have not come in though queue_command, so we + * can set the retry_pending flag to true for a driver initiated + * retry attempt (I.E. not a SML retry). + * I.E. We are submitting a driver initiated retry. + * Note: hpsa_ciss_submit does not zero out the command fields like + * ioaccel submit does. + */ + c->retry_pending = true; + if (hpsa_ciss_submit(c->h, c, cmd, dev)) { /* * If we get here, it means dma mapping failed. Try * again via scsi mid layer, which will then get @@ -5414,7 +5662,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work) * if it encountered a dma mapping failure. */ cmd->result = DID_IMM_RETRY << 16; - cmd->scsi_done(cmd); + scsi_done(cmd); } } @@ -5423,45 +5671,59 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; - unsigned char scsi3addr[8]; struct CommandList *c; int rc = 0; /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); - BUG_ON(cmd->request->tag < 0); + BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; - cmd->scsi_done(cmd); + scsi_done(cmd); return 0; } if (dev->removed) { cmd->result = DID_NO_CONNECT << 16; - cmd->scsi_done(cmd); + scsi_done(cmd); return 0; } - memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); - if (unlikely(lockup_detected(h))) { cmd->result = DID_NO_CONNECT << 16; - cmd->scsi_done(cmd); + scsi_done(cmd); return 0; } + + if (dev->in_reset) + return SCSI_MLQUEUE_DEVICE_BUSY; + c = cmd_tagged_alloc(h, cmd); + if (c == NULL) + return SCSI_MLQUEUE_DEVICE_BUSY; + + /* + * This is necessary because the SML doesn't zero out this field during + * error recovery. + */ + cmd->result = 0; /* * Call alternate submit routine for I/O accelerated commands. * Retries always go down the normal I/O path. + * Note: If cmd->retries is non-zero, then this is a SML + * initiated retry and not a driver initiated retry. + * This command has been obtained from cmd_tagged_alloc + * and is therefore a brand-new command. */ if (likely(cmd->retries == 0 && - !blk_rq_is_passthrough(cmd->request) && + !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && h->acciopath_status)) { - rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); + /* Submit with the retry_pending flag unset. */ + rc = hpsa_ioaccel_submit(h, c, cmd, false); if (rc == 0) return 0; if (rc == SCSI_MLQUEUE_HOST_BUSY) { @@ -5469,7 +5731,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) return SCSI_MLQUEUE_HOST_BUSY; } } - return hpsa_ciss_submit(h, c, cmd, scsi3addr); + return hpsa_ciss_submit(h, c, cmd, dev); } static void hpsa_scan_complete(struct ctlr_info *h) @@ -5577,7 +5839,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; @@ -5623,7 +5885,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h) */ static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) { - int idx = scmd->request->tag; + int idx = scsi_cmd_to_rq(scmd)->tag; if (idx < 0) return idx; @@ -5645,7 +5907,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h, /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); if (rc) return rc; /* no unmap needed here because no data xfer. */ @@ -5751,8 +6013,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) { int rc = SUCCESS; + int i; struct ctlr_info *h; - struct hpsa_scsi_dev_t *dev; + struct hpsa_scsi_dev_t *dev = NULL; u8 reset_type; char msg[48]; unsigned long flags; @@ -5818,9 +6081,19 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + /* + * wait to see if any commands will complete before sending reset + */ + dev->in_reset = true; /* block any new cmds from OS for this device */ + for (i = 0; i < 10; i++) { + if (atomic_read(&dev->commands_outstanding) > 0) + msleep(1000); + else + break; + } + /* send a reset to the SCSI LUN which the command was sent to */ - rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, - DEFAULT_REPLY_QUEUE); + rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE); if (rc == 0) rc = SUCCESS; else @@ -5834,6 +6107,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) return_reset_status: spin_lock_irqsave(&h->reset_lock, flags); h->reset_in_progress = 0; + if (dev) + dev->in_reset = false; spin_unlock_irqrestore(&h->reset_lock, flags); return rc; } @@ -5843,6 +6118,7 @@ return_reset_status: * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the * block request tag as an index into a table of entries. cmd_tagged_free() is * the complement, although cmd_free() may be called instead. + * This function is only called for new requests from queue_command. */ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, struct scsi_cmnd *scmd) @@ -5859,7 +6135,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, BUG(); } - atomic_inc(&c->refcount); if (unlikely(!hpsa_is_cmd_idle(c))) { /* * We expect that the SCSI layer will hand us a unique tag @@ -5867,15 +6142,25 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, * two requests...because if the selected command isn't idle * then someone is going to be very disappointed. */ - dev_err(&h->pdev->dev, - "tag collision (tag=%d) in cmd_tagged_alloc().\n", - idx); - if (c->scsi_cmd != NULL) - scsi_print_command(c->scsi_cmd); - scsi_print_command(scmd); + if (idx != h->last_collision_tag) { /* Print once per tag */ + dev_warn(&h->pdev->dev, + "%s: tag collision (tag=%d)\n", __func__, idx); + if (scmd) + scsi_print_command(scmd); + h->last_collision_tag = idx; + } + return NULL; } + atomic_inc(&c->refcount); hpsa_cmd_partial_init(h, idx, c); + + /* + * This is a new command obtained from queue_command so + * there have not been any driver initiated retry attempts. + */ + c->retry_pending = false; + return c; } @@ -5937,11 +6222,18 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) offset = (i + 1) % HPSA_NRESERVED_CMDS; continue; } - set_bit(i & (BITS_PER_LONG - 1), - h->cmd_pool_bits + (i / BITS_PER_LONG)); + set_bit(i, h->cmd_pool_bits); break; /* it's ours now. */ } hpsa_cmd_partial_init(h, i, c); + c->device = NULL; + + /* + * cmd_alloc is for "internal" commands and they are never + * retried. + */ + c->retry_pending = false; + return c; } @@ -5957,88 +6249,76 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) int i; i = c - h->cmd_pool; - clear_bit(i & (BITS_PER_LONG - 1), - h->cmd_pool_bits + (i / BITS_PER_LONG)); + clear_bit(i, h->cmd_pool_bits); } } #ifdef CONFIG_COMPAT -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, +static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { - IOCTL32_Command_struct __user *arg32 = - (IOCTL32_Command_struct __user *) arg; + struct ctlr_info *h = sdev_to_hba(dev); + IOCTL32_Command_struct __user *arg32 = arg; IOCTL_Command_struct arg64; - IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; - memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); + if (!arg) + return -EINVAL; - if (err) + memset(&arg64, 0, sizeof(arg64)); + if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf))) + return -EFAULT; + if (get_user(cp, &arg32->buf)) return -EFAULT; + arg64.buf = compat_ptr(cp); - err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); if (err) return err; - err |= copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) return -EFAULT; - return err; + return 0; } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, - int cmd, void __user *arg) + unsigned int cmd, void __user *arg) { - BIG_IOCTL32_Command_struct __user *arg32 = - (BIG_IOCTL32_Command_struct __user *) arg; + struct ctlr_info *h = sdev_to_hba(dev); + BIG_IOCTL32_Command_struct __user *arg32 = arg; BIG_IOCTL_Command_struct arg64; - BIG_IOCTL_Command_struct __user *p = - compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; + if (!arg) + return -EINVAL; memset(&arg64, 0, sizeof(arg64)); - err = 0; - err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, - sizeof(arg64.LUN_info)); - err |= copy_from_user(&arg64.Request, &arg32->Request, - sizeof(arg64.Request)); - err |= copy_from_user(&arg64.error_info, &arg32->error_info, - sizeof(arg64.error_info)); - err |= get_user(arg64.buf_size, &arg32->buf_size); - err |= get_user(arg64.malloc_size, &arg32->malloc_size); - err |= get_user(cp, &arg32->buf); - arg64.buf = compat_ptr(cp); - err |= copy_to_user(p, &arg64, sizeof(arg64)); - - if (err) + if (copy_from_user(&arg64, arg32, + offsetof(BIG_IOCTL32_Command_struct, buf))) return -EFAULT; + if (get_user(cp, &arg32->buf)) + return -EFAULT; + arg64.buf = compat_ptr(cp); - err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); + if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) + return -EAGAIN; + err = hpsa_big_passthru_ioctl(h, &arg64); + atomic_inc(&h->passthru_cmds_avail); if (err) return err; - err |= copy_in_user(&arg32->error_info, &p->error_info, - sizeof(arg32->error_info)); - if (err) + if (copy_to_user(&arg32->error_info, &arg64.error_info, + sizeof(arg32->error_info))) return -EFAULT; - return err; + return 0; } -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: @@ -6107,37 +6387,29 @@ static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) return 0; } -static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) +static int hpsa_passthru_ioctl(struct ctlr_info *h, + IOCTL_Command_struct *iocommand) { - IOCTL_Command_struct iocommand; struct CommandList *c; char *buff = NULL; u64 temp64; int rc = 0; - if (!argp) - return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - if (copy_from_user(&iocommand, argp, sizeof(iocommand))) - return -EFAULT; - if ((iocommand.buf_size < 1) && - (iocommand.Request.Type.Direction != XFER_NONE)) { + if ((iocommand->buf_size < 1) && + (iocommand->Request.Type.Direction != XFER_NONE)) { return -EINVAL; } - if (iocommand.buf_size > 0) { - buff = kmalloc(iocommand.buf_size, GFP_KERNEL); - if (buff == NULL) - return -ENOMEM; - if (iocommand.Request.Type.Direction & XFER_WRITE) { - /* Copy the data into the buffer we created */ - if (copy_from_user(buff, iocommand.buf, - iocommand.buf_size)) { - rc = -EFAULT; - goto out_kfree; - } + if (iocommand->buf_size > 0) { + if (iocommand->Request.Type.Direction & XFER_WRITE) { + buff = memdup_user(iocommand->buf, iocommand->buf_size); + if (IS_ERR(buff)) + return PTR_ERR(buff); } else { - memset(buff, 0, iocommand.buf_size); + buff = kzalloc(iocommand->buf_size, GFP_KERNEL); + if (!buff) + return -ENOMEM; } } c = cmd_alloc(h); @@ -6147,23 +6419,23 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ - if (iocommand.buf_size > 0) { /* buffer to fill */ + if (iocommand->buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = cpu_to_le16(1); } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = cpu_to_le16(0); } - memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); + memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN)); /* Fill in Request block */ - memcpy(&c->Request, &iocommand.Request, + memcpy(&c->Request, &iocommand->Request, sizeof(c->Request)); /* Fill in the scatter gather information */ - if (iocommand.buf_size > 0) { - temp64 = pci_map_single(h->pdev, buff, - iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); + if (iocommand->buf_size > 0) { + temp64 = dma_map_single(&h->pdev->dev, buff, + iocommand->buf_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { c->SG[0].Addr = cpu_to_le64(0); c->SG[0].Len = cpu_to_le32(0); @@ -6171,13 +6443,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) goto out; } c->SG[0].Addr = cpu_to_le64(temp64); - c->SG[0].Len = cpu_to_le32(iocommand.buf_size); + c->SG[0].Len = cpu_to_le32(iocommand->buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); - if (iocommand.buf_size > 0) - hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); + if (iocommand->buf_size > 0) + hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); if (rc) { rc = -EIO; @@ -6185,30 +6457,25 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) } /* Copy the error information out */ - memcpy(&iocommand.error_info, c->err_info, - sizeof(iocommand.error_info)); - if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { - rc = -EFAULT; - goto out; - } - if ((iocommand.Request.Type.Direction & XFER_READ) && - iocommand.buf_size > 0) { + memcpy(&iocommand->error_info, c->err_info, + sizeof(iocommand->error_info)); + if ((iocommand->Request.Type.Direction & XFER_READ) && + iocommand->buf_size > 0) { /* Copy the data out of the buffer we created */ - if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { + if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) { rc = -EFAULT; goto out; } } out: cmd_free(h, c); -out_kfree: kfree(buff); return rc; } -static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) +static int hpsa_big_passthru_ioctl(struct ctlr_info *h, + BIG_IOCTL_Command_struct *ioc) { - BIG_IOCTL_Command_struct *ioc; struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; @@ -6219,39 +6486,23 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) u32 sz; BYTE __user *data_ptr; - if (!argp) - return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); - if (!ioc) { - status = -ENOMEM; - goto cleanup1; - } - if (copy_from_user(ioc, argp, sizeof(*ioc))) { - status = -EFAULT; - goto cleanup1; - } + if ((ioc->buf_size < 1) && - (ioc->Request.Type.Direction != XFER_NONE)) { - status = -EINVAL; - goto cleanup1; - } + (ioc->Request.Type.Direction != XFER_NONE)) + return -EINVAL; /* Check kmalloc limits using all SGs */ - if (ioc->malloc_size > MAX_KMALLOC_SIZE) { - status = -EINVAL; - goto cleanup1; - } - if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { - status = -EINVAL; - goto cleanup1; - } - buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); + if (ioc->malloc_size > MAX_KMALLOC_SIZE) + return -EINVAL; + if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) + return -EINVAL; + buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } - buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); + buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; @@ -6261,18 +6512,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } + if (ioc->Request.Type.Direction & XFER_WRITE) { - if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -EFAULT; + buff[sg_used] = memdup_user(data_ptr, sz); + if (IS_ERR(buff[sg_used])) { + status = PTR_ERR(buff[sg_used]); goto cleanup1; } - } else - memset(buff[sg_used], 0, sz); + } else { + buff[sg_used] = kzalloc(sz, GFP_KERNEL); + if (!buff[sg_used]) { + status = -ENOMEM; + goto cleanup1; + } + } + left -= sz; data_ptr += sz; sg_used++; @@ -6289,14 +6543,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { - temp64 = pci_map_single(h->pdev, buff[i], - buff_size[i], PCI_DMA_BIDIRECTIONAL); + temp64 = dma_map_single(&h->pdev->dev, buff[i], + buff_size[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { c->SG[i].Addr = cpu_to_le64(0); c->SG[i].Len = cpu_to_le32(0); hpsa_pci_unmap(h->pdev, c, i, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); status = -ENOMEM; goto cleanup0; } @@ -6309,7 +6563,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (sg_used) - hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); + hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); if (status) { status = -EIO; @@ -6318,10 +6572,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); - if (copy_to_user(argp, ioc, sizeof(*ioc))) { - status = -EFAULT; - goto cleanup0; - } if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { int i; @@ -6347,7 +6597,6 @@ cleanup1: kfree(buff); } kfree(buff_size); - kfree(ioc); return status; } @@ -6362,14 +6611,12 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, /* * ioctl */ -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *argp) { - struct ctlr_info *h; - void __user *argp = (void __user *)arg; + struct ctlr_info *h = sdev_to_hba(dev); int rc; - h = sdev_to_hba(dev); - switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: @@ -6380,25 +6627,41 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) return hpsa_getpciinfo_ioctl(h, argp); case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); - case CCISS_PASSTHRU: + case CCISS_PASSTHRU: { + IOCTL_Command_struct iocommand; + + if (!argp) + return -EINVAL; + if (copy_from_user(&iocommand, argp, sizeof(iocommand))) + return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; - rc = hpsa_passthru_ioctl(h, argp); + rc = hpsa_passthru_ioctl(h, &iocommand); atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand))) + rc = -EFAULT; return rc; - case CCISS_BIG_PASSTHRU: + } + case CCISS_BIG_PASSTHRU: { + BIG_IOCTL_Command_struct ioc; + if (!argp) + return -EINVAL; + if (copy_from_user(&ioc, argp, sizeof(ioc))) + return -EFAULT; if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) return -EAGAIN; - rc = hpsa_big_passthru_ioctl(h, argp); + rc = hpsa_big_passthru_ioctl(h, &ioc); atomic_inc(&h->passthru_cmds_avail); + if (!rc && copy_to_user(argp, &ioc, sizeof(ioc))) + rc = -EFAULT; return rc; + } default: return -ENOTTY; } } -static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, - u8 reset_type) +static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type) { struct CommandList *c; @@ -6421,7 +6684,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type) { - int pci_dir = XFER_NONE; + enum dma_data_direction dir = DMA_NONE; c->cmd_type = CMD_IOCTL_PEND; c->scsi_cmd = SCSI_CMD_BUSY; @@ -6450,6 +6713,17 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[0] = HPSA_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; + case RECEIVE_DIAGNOSTIC: + c->Request.CDBLen = 6; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + c->Request.CDB[1] = 1; + c->Request.CDB[2] = 1; + c->Request.CDB[3] = (size >> 8) & 0xFF; + c->Request.CDB[4] = size & 0xFF; + break; case HPSA_REPORT_LOG: case HPSA_REPORT_PHYS: /* Talking to controller so It's a physical command @@ -6571,7 +6845,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); BUG(); - return -1; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { @@ -6617,18 +6890,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, switch (GET_DIR(c->Request.type_attr_dir)) { case XFER_READ: - pci_dir = PCI_DMA_FROMDEVICE; + dir = DMA_FROM_DEVICE; break; case XFER_WRITE: - pci_dir = PCI_DMA_TODEVICE; + dir = DMA_TO_DEVICE; break; case XFER_NONE: - pci_dir = PCI_DMA_NONE; + dir = DMA_NONE; break; default: - pci_dir = PCI_DMA_BIDIRECTIONAL; + dir = DMA_BIDIRECTIONAL; } - if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) + if (hpsa_map_one(h->pdev, c, buff, size, dir)) return -1; return 0; } @@ -6640,7 +6913,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; - void __iomem *page_remapped = ioremap_nocache(page_base, + void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; @@ -6824,13 +7097,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, * CCISS commands, so they must be allocated from the lower 4GiB of * memory. */ - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return err; } - cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); + cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; @@ -6879,7 +7152,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, return -ETIMEDOUT; } - pci_free_consistent(pdev, cmd_sz, cmd, paddr64); + dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64); if (tag & HPSA_ERROR_BIT) { dev_err(&pdev->dev, "controller message %02x:%02x failed\n", @@ -6950,8 +7223,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, static void init_driver_version(char *driver_version, int len) { - memset(driver_version, 0, len); - strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); + strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len); } static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) @@ -6985,7 +7257,7 @@ static int controller_reset_failed(struct CfgTable __iomem *cfgtable) char *driver_ver, *old_driver_ver; int rc, size = sizeof(cfgtable->driver_version); - old_driver_ver = kmalloc(2 * size, GFP_KERNEL); + old_driver_ver = kmalloc_array(2, size, GFP_KERNEL); if (!old_driver_ver) return -ENOMEM; driver_ver = old_driver_ver + size; @@ -7184,7 +7456,6 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) dev_warn(&pdev->dev, "base address is invalid\n"); return -1; - break; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) @@ -7199,12 +7470,32 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h) h->msix_vectors = 0; } +static void hpsa_setup_reply_map(struct ctlr_info *h) +{ + const struct cpumask *mask; + unsigned int queue, cpu; + + for (queue = 0; queue < h->msix_vectors; queue++) { + mask = pci_irq_get_affinity(h->pdev, queue); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + h->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + h->reply_map[cpu] = 0; +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ static int hpsa_interrupt_mode(struct ctlr_info *h) { - unsigned int flags = PCI_IRQ_LEGACY; + unsigned int flags = PCI_IRQ_INTX; int ret; /* Some boards advertise MSI but don't really support it */ @@ -7232,7 +7523,8 @@ static int hpsa_interrupt_mode(struct ctlr_info *h) return 0; } -static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) +static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id, + bool *legacy_board) { int i; u32 subsystem_vendor_id, subsystem_device_id; @@ -7242,17 +7534,24 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; + if (legacy_board) + *legacy_board = false; for (i = 0; i < ARRAY_SIZE(products); i++) - if (*board_id == products[i].board_id) + if (*board_id == products[i].board_id) { + if (products[i].access != &SA5A_access && + products[i].access != &SA5B_access) + return i; + dev_warn(&pdev->dev, + "legacy board ID: 0x%08x\n", + *board_id); + if (legacy_board) + *legacy_board = true; return i; + } - if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && - subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || - !hpsa_allow_any) { - dev_warn(&pdev->dev, "unrecognized board ID: " - "0x%08x, ignoring.\n", *board_id); - return -ENODEV; - } + dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id); + if (legacy_board) + *legacy_board = true; return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ } @@ -7326,8 +7625,8 @@ static void hpsa_free_cfgtables(struct ctlr_info *h) } /* Find and map CISS config table and transfer table -+ * several items must be unmapped (freed) later -+ * */ + * several items must be unmapped (freed) later + */ static int hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; @@ -7545,7 +7844,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.txt + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); /* pci_init 1 */ pci_release_regions(h->pdev); /* pci_init 2 */ @@ -7555,13 +7854,14 @@ static void hpsa_free_pci_init(struct ctlr_info *h) static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; + bool legacy_board; - prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); + prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board); if (prod_index < 0) return prod_index; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); - + h->legacy_board = legacy_board; pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); @@ -7585,6 +7885,10 @@ static int hpsa_pci_init(struct ctlr_info *h) err = hpsa_interrupt_mode(h); if (err) goto clean1; + + /* setup mapping between CPU and reply queue */ + hpsa_setup_reply_map(h); + err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto clean2; /* intmode+region, pci */ @@ -7623,7 +7927,7 @@ clean2: /* intmode+region, pci */ clean1: /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.txt + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); @@ -7710,10 +8014,10 @@ out_disable: static void hpsa_free_cmd_pool(struct ctlr_info *h) { - kfree(h->cmd_pool_bits); + bitmap_free(h->cmd_pool_bits); h->cmd_pool_bits = NULL; if (h->cmd_pool) { - pci_free_consistent(h->pdev, + dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); @@ -7721,7 +8025,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) h->cmd_pool_dhandle = 0; } if (h->errinfo_pool) { - pci_free_consistent(h->pdev, + dma_free_coherent(&h->pdev->dev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); @@ -7732,15 +8036,13 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) static int hpsa_alloc_cmd_pool(struct ctlr_info *h) { - h->cmd_pool_bits = kzalloc( - DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * - sizeof(unsigned long), GFP_KERNEL); - h->cmd_pool = pci_alloc_consistent(h->pdev, + h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL); + h->cmd_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->cmd_pool), - &(h->cmd_pool_dhandle)); - h->errinfo_pool = pci_alloc_consistent(h->pdev, + &h->cmd_pool_dhandle, GFP_KERNEL); + h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->errinfo_pool), - &(h->errinfo_pool_dhandle)); + &h->errinfo_pool_dhandle, GFP_KERNEL); if ((h->cmd_pool_bits == NULL) || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { @@ -7758,10 +8060,15 @@ clean_up: static void hpsa_free_irqs(struct ctlr_info *h) { int i; + int irq_vector = 0; + + if (hpsa_simple_mode) + irq_vector = h->intr_mode; if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) { /* Single reply queue, only one irq to free */ - free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]); + free_irq(pci_irq_vector(h->pdev, irq_vector), + &h->q[h->intr_mode]); h->q[h->intr_mode] = 0; return; } @@ -7780,6 +8087,10 @@ static int hpsa_request_irqs(struct ctlr_info *h, irqreturn_t (*intxhandler)(int, void *)) { int rc, i; + int irq_vector = 0; + + if (hpsa_simple_mode) + irq_vector = h->intr_mode; /* * initialize h->q[x] = x so that interrupt handlers know which @@ -7815,14 +8126,14 @@ static int hpsa_request_irqs(struct ctlr_info *h, if (h->msix_vectors > 0 || h->pdev->msi_enabled) { sprintf(h->intrname[0], "%s-msi%s", h->devname, h->msix_vectors ? "x" : ""); - rc = request_irq(pci_irq_vector(h->pdev, 0), + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), msixhandler, 0, h->intrname[0], &h->q[h->intr_mode]); } else { sprintf(h->intrname[h->intr_mode], "%s-intx", h->devname); - rc = request_irq(pci_irq_vector(h->pdev, 0), + rc = request_irq(pci_irq_vector(h->pdev, irq_vector), intxhandler, IRQF_SHARED, h->intrname[0], &h->q[h->intr_mode]); @@ -7830,7 +8141,7 @@ static int hpsa_request_irqs(struct ctlr_info *h, } if (rc) { dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", - pci_irq_vector(h->pdev, 0), h->devname); + pci_irq_vector(h->pdev, irq_vector), h->devname); hpsa_free_irqs(h); return -ENODEV; } @@ -7840,7 +8151,7 @@ static int hpsa_request_irqs(struct ctlr_info *h, static int hpsa_kdump_soft_reset(struct ctlr_info *h) { int rc; - hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); + hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER); dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); @@ -7867,7 +8178,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h) for (i = 0; i < h->nreply_queues; i++) { if (!h->reply_queue[i].head) continue; - pci_free_consistent(h->pdev, + dma_free_coherent(&h->pdev->dev, h->reply_queue_size, h->reply_queue[i].head, h->reply_queue[i].busaddr); @@ -7896,6 +8207,11 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) destroy_workqueue(h->rescan_ctlr_wq); h->rescan_ctlr_wq = NULL; } + if (h->monitor_ctlr_wq) { + destroy_workqueue(h->monitor_ctlr_wq); + h->monitor_ctlr_wq = NULL; + } + kfree(h); /* init_one 1 */ } @@ -7953,6 +8269,10 @@ static void controller_lockup_detected(struct ctlr_info *h) spin_unlock_irqrestore(&h->lock, flags); dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", lockup_detected, h->heartbeat_sample_interval / HZ); + if (lockup_detected == 0xffff0000) { + dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n"); + writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL); + } pci_disable_device(h->pdev); fail_all_outstanding_cmds(h); } @@ -7993,9 +8313,90 @@ static int detect_controller_lockup(struct ctlr_info *h) return false; } -static void hpsa_ack_ctlr_events(struct ctlr_info *h) +/* + * Set ioaccel status for all ioaccel volumes. + * + * Called from monitor controller worker (hpsa_event_monitor_worker) + * + * A Volume (or Volumes that comprise an Array set) may be undergoing a + * transformation, so we will be turning off ioaccel for all volumes that + * make up the Array. + */ +static void hpsa_set_ioaccel_status(struct ctlr_info *h) { + int rc; int i; + u8 ioaccel_status; + unsigned char *buf; + struct hpsa_scsi_dev_t *device; + + if (!h) + return; + + buf = kmalloc(64, GFP_KERNEL); + if (!buf) + return; + + /* + * Run through current device list used during I/O requests. + */ + for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + + device = h->dev[i]; + + if (!device) + continue; + if (!hpsa_vpd_page_supported(h, device->scsi3addr, + HPSA_VPD_LV_IOACCEL_STATUS)) + continue; + + memset(buf, 0, 64); + + rc = hpsa_scsi_do_inquiry(h, device->scsi3addr, + VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, + buf, 64); + if (rc != 0) + continue; + + ioaccel_status = buf[IOACCEL_STATUS_BYTE]; + + /* + * Check if offload is still configured on + */ + offload_config = + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = + !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + + /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + + /* + * Immediately turn off ioaccel for any volume the + * controller tells us to. Some of the reasons could be: + * transformation - change to the LVs of an Array. + * degraded volume - component failure + */ + hpsa_turn_off_ioaccel_for_device(device); + } + + kfree(buf); +} + +static void hpsa_ack_ctlr_events(struct ctlr_info *h) +{ char *event_type; if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) @@ -8013,10 +8414,7 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) event_type = "configuration change"; /* Stop sending new RAID offload reqs via the IO accelerator */ scsi_block_requests(h->scsi_host); - for (i = 0; i < h->ndevices; i++) { - h->dev[i]->offload_enabled = 0; - h->dev[i]->offload_to_be_enabled = 0; - } + hpsa_set_ioaccel_status(h); hpsa_drain_accel_commands(h); /* Set 'accelerator path config change' bit */ dev_warn(&h->pdev->dev, @@ -8033,10 +8431,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) writel(h->events, &(h->cfgtable->clear_event_notify)); writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); hpsa_wait_for_clear_event_notify_ack(h); -#if 0 - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - hpsa_wait_for_mode_change_ack(h); -#endif } return; } @@ -8166,8 +8560,8 @@ static void hpsa_event_monitor_worker(struct work_struct *work) spin_lock_irqsave(&h->lock, flags); if (!h->remove_in_progress) - schedule_delayed_work(&h->event_monitor_work, - HPSA_EVENT_MONITOR_INTERVAL); + queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work, + HPSA_EVENT_MONITOR_INTERVAL); spin_unlock_irqrestore(&h->lock, flags); } @@ -8187,7 +8581,6 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work) if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { hpsa_perform_rescan(h); } else if (h->discovery_polling) { - hpsa_disable_rld_caching(h); if (hpsa_luns_changed(h)) { dev_info(&h->pdev->dev, "driver discovery polling rescan.\n"); @@ -8213,7 +8606,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work) spin_lock_irqsave(&h->lock, flags); if (!h->remove_in_progress) - schedule_delayed_work(&h->monitor_ctlr_work, + queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work, h->heartbeat_sample_interval); spin_unlock_irqrestore(&h->lock, flags); } @@ -8230,9 +8623,31 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, return wq; } +static void hpda_free_ctlr_info(struct ctlr_info *h) +{ + kfree(h->reply_map); + kfree(h); +} + +static struct ctlr_info *hpda_alloc_ctlr_info(void) +{ + struct ctlr_info *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return NULL; + + h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL); + if (!h->reply_map) { + kfree(h); + return NULL; + } + return h; +} + static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int dac, rc; + int rc; struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; @@ -8241,7 +8656,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); - rc = hpsa_lookup_board_id(pdev, &board_id); + rc = hpsa_lookup_board_id(pdev, &board_id, NULL); if (rc < 0) { dev_warn(&pdev->dev, "Board ID not found\n"); return rc; @@ -8267,7 +8682,7 @@ reinit_after_soft_reset: * the driver. See comments in hpsa.h for more info. */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); - h = kzalloc(sizeof(*h), GFP_KERNEL); + h = hpda_alloc_ctlr_info(); if (!h) { dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; @@ -8307,14 +8722,10 @@ reinit_after_soft_reset: number_of_controllers++; /* configure PCI DMA stuff */ - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc == 0) { - dac = 1; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc == 0) { - dac = 0; - } else { + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (rc != 0) { + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (rc != 0) { dev_err(&pdev->dev, "no suitable DMA available\n"); goto clean3; /* shost, pci, lu, aer/h */ } @@ -8359,6 +8770,12 @@ reinit_after_soft_reset: goto clean7; /* aer/h */ } + h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor"); + if (!h->monitor_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } + /* * At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try @@ -8445,7 +8862,7 @@ reinit_after_soft_reset: /* hook into SCSI subsystem */ rc = hpsa_scsi_add_host(h); if (rc) - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; @@ -8460,6 +8877,8 @@ reinit_after_soft_reset: HPSA_EVENT_MONITOR_INTERVAL); return 0; +clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->lastlogicals); clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ hpsa_free_performant_mode(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); @@ -8488,7 +8907,11 @@ clean1: /* wq/aer/h */ destroy_workqueue(h->rescan_ctlr_wq); h->rescan_ctlr_wq = NULL; } - kfree(h); + if (h->monitor_ctlr_wq) { + destroy_workqueue(h->monitor_ctlr_wq); + h->monitor_ctlr_wq = NULL; + } + hpda_free_ctlr_info(h); return rc; } @@ -8510,8 +8933,8 @@ static void hpsa_flush_cache(struct ctlr_info *h) RAID_CTLR_LUNID, TYPE_CMD)) { goto out; } - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, + DEFAULT_TIMEOUT); if (rc) goto out; if (c->err_info->CommandStatus != 0) @@ -8546,8 +8969,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) RAID_CTLR_LUNID, TYPE_CMD)) goto errout; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -8558,8 +8981,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) RAID_CTLR_LUNID, TYPE_CMD)) goto errout; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE, + NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -8568,8 +8991,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) RAID_CTLR_LUNID, TYPE_CMD)) goto errout; - rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, - PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE, + NO_TIMEOUT); if ((rc != 0) || (c->err_info->CommandStatus != 0)) goto errout; @@ -8584,7 +9007,7 @@ out: kfree(options); } -static void hpsa_shutdown(struct pci_dev *pdev) +static void __hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; @@ -8599,6 +9022,12 @@ static void hpsa_shutdown(struct pci_dev *pdev) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } +static void hpsa_shutdown(struct pci_dev *pdev) +{ + __hpsa_shutdown(pdev); + pci_disable_device(pdev); +} + static void hpsa_free_device_info(struct ctlr_info *h) { int i; @@ -8629,6 +9058,9 @@ static void hpsa_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&h->event_monitor_work); destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); + destroy_workqueue(h->monitor_ctlr_wq); + + hpsa_delete_sas_host(h); /* * Call before disabling interrupts. @@ -8640,7 +9072,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) scsi_remove_host(h->scsi_host); /* init_one 8 */ /* includes hpsa_free_irqs - init_one 4 */ /* includes hpsa_disable_interrupt_mode - pci_init 2 */ - hpsa_shutdown(pdev); + __hpsa_shutdown(pdev); hpsa_free_device_info(h); /* scan */ @@ -8662,32 +9094,31 @@ static void hpsa_remove_one(struct pci_dev *pdev) free_percpu(h->lockup_detected); /* init_one 2 */ h->lockup_detected = NULL; /* init_one 2 */ - /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ - - hpsa_delete_sas_host(h); - kfree(h); /* init_one 1 */ + hpda_free_ctlr_info(h); /* init_one 1 */ } -static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, - __attribute__((unused)) pm_message_t state) +static int __maybe_unused hpsa_suspend( + __attribute__((unused)) struct device *dev) { return -ENOSYS; } -static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) +static int __maybe_unused hpsa_resume + (__attribute__((unused)) struct device *dev) { return -ENOSYS; } +static SIMPLE_DEV_PM_OPS(hpsa_pm_ops, hpsa_suspend, hpsa_resume); + static struct pci_driver hpsa_pci_driver = { .name = HPSA, .probe = hpsa_init_one, .remove = hpsa_remove_one, .id_table = hpsa_pci_device_id, /* id_table */ .shutdown = hpsa_shutdown, - .suspend = hpsa_suspend, - .resume = hpsa_resume, + .driver.pm = &hpsa_pm_ops, }; /* Fill in bucket_map[], given nsgs (the max number of @@ -8876,10 +9307,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) } else if (trans_support & CFGTBL_Trans_io_accel2) { u64 cfg_offset, cfg_base_addr_index; u32 bft2_offset, cfg_base_addr; - int rc; - rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, - &cfg_base_addr_index, &cfg_offset); + hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, @@ -8909,10 +9339,10 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) { if (h->ioaccel_cmd_pool) { - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), - h->ioaccel_cmd_pool, - h->ioaccel_cmd_pool_dhandle); + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, + h->ioaccel_cmd_pool_dhandle); h->ioaccel_cmd_pool = NULL; h->ioaccel_cmd_pool_dhandle = 0; } @@ -8935,9 +9365,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % IOACCEL1_COMMANDLIST_ALIGNMENT); h->ioaccel_cmd_pool = - pci_alloc_consistent(h->pdev, + dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), - &(h->ioaccel_cmd_pool_dhandle)); + &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL); h->ioaccel1_blockFetchTable = kmalloc(((h->ioaccel_maxsg + 1) * @@ -8962,10 +9392,10 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) hpsa_free_ioaccel2_sg_chain_blocks(h); if (h->ioaccel2_cmd_pool) { - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - h->ioaccel2_cmd_pool, - h->ioaccel2_cmd_pool_dhandle); + dma_free_coherent(&h->pdev->dev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, + h->ioaccel2_cmd_pool_dhandle); h->ioaccel2_cmd_pool = NULL; h->ioaccel2_cmd_pool_dhandle = 0; } @@ -8988,9 +9418,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % IOACCEL2_COMMANDLIST_ALIGNMENT); h->ioaccel2_cmd_pool = - pci_alloc_consistent(h->pdev, + dma_alloc_coherent(&h->pdev->dev, h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - &(h->ioaccel2_cmd_pool_dhandle)); + &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL); h->ioaccel2_blockFetchTable = kmalloc(((h->ioaccel_maxsg + 1) * @@ -9031,8 +9461,6 @@ static void hpsa_free_performant_mode(struct ctlr_info *h) static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; - unsigned long transMethod = CFGTBL_Trans_Performant | - CFGTBL_Trans_use_short_tags; int i, rc; if (hpsa_simple_mode) @@ -9044,14 +9472,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) /* Check for I/O accelerator mode support */ if (trans_support & CFGTBL_Trans_io_accel1) { - transMethod |= CFGTBL_Trans_io_accel1 | - CFGTBL_Trans_enable_directed_msix; rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); if (rc) return rc; } else if (trans_support & CFGTBL_Trans_io_accel2) { - transMethod |= CFGTBL_Trans_io_accel2 | - CFGTBL_Trans_enable_directed_msix; rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); if (rc) return rc; @@ -9063,9 +9487,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) h->reply_queue_size = h->max_commands * sizeof(u64); for (i = 0; i < h->nreply_queues; i++) { - h->reply_queue[i].head = pci_alloc_consistent(h->pdev, + h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, h->reply_queue_size, - &(h->reply_queue[i].busaddr)); + &h->reply_queue[i].busaddr, + GFP_KERNEL); if (!h->reply_queue[i].head) { rc = -ENOMEM; goto clean1; /* rq, ioaccel */ @@ -9153,9 +9578,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) struct sas_phy *phy = hpsa_sas_phy->phy; sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (hpsa_sas_phy->added_to_port) list_del(&hpsa_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(hpsa_sas_phy); } @@ -9313,7 +9738,7 @@ static int hpsa_add_sas_host(struct ctlr_info *h) struct hpsa_sas_port *hpsa_sas_port; struct hpsa_sas_phy *hpsa_sas_phy; - parent_dev = &h->scsi_host->shost_gendev; + parent_dev = &h->scsi_host->shost_dev; hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); if (!hpsa_sas_node) @@ -9340,7 +9765,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) return 0; free_sas_phy: - hpsa_free_sas_phy(hpsa_sas_phy); + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: @@ -9376,10 +9802,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; @@ -9404,7 +9832,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy) static int hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { - *identifier = 0; + struct Scsi_Host *shost = phy_to_shost(rphy); + struct ctlr_info *h; + struct hpsa_scsi_dev_t *sd; + + if (!shost) + return -ENXIO; + + h = shost_to_hba(shost); + + if (!h) + return -ENXIO; + + sd = hpsa_find_device_by_sas_rphy(h, rphy); + if (!sd) + return -ENXIO; + + *identifier = sd->eli; + return 0; } @@ -9443,14 +9888,6 @@ hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) return -EINVAL; } -/* SMP = Serial Management Protocol */ -static int -hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, -struct request *req) -{ - return -EINVAL; -} - static struct sas_function_template hpsa_sas_transport_functions = { .get_linkerrors = hpsa_sas_get_linkerrors, .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, @@ -9460,7 +9897,6 @@ static struct sas_function_template hpsa_sas_transport_functions = { .phy_setup = hpsa_sas_phy_setup, .phy_release = hpsa_sas_phy_release, .set_phy_speed = hpsa_sas_phy_speed, - .smp_handler = hpsa_sas_smp_handler, }; /* |
