summaryrefslogtreecommitdiff
path: root/drivers/scsi/smartpqi/smartpqi_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6183
1 files changed, 4346 insertions, 1837 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index f564af8949e8..fe549e2b7c94 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016-2017 Microsemi Corporation
+ * driver for Microchip PQI-based storage controllers
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ * Questions/Comments/Bugfixes to storagedev@microchip.com
*
*/
@@ -26,13 +19,14 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
+#include <linux/crash_dump.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport_sas.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "smartpqi.h"
#include "smartpqi_sis.h"
@@ -40,28 +34,43 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.4-070"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 4
-#define DRIVER_REVISION 70
+#define DRIVER_VERSION "2.1.36-026"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_RELEASE 36
+#define DRIVER_REVISION 26
-#define DRIVER_NAME "Microsemi PQI Driver (v" \
+#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
-MODULE_AUTHOR("Microsemi");
-MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+#define PQI_POST_RESET_DELAY_SECS 5
+#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
+
+#define PQI_NO_COMPLETION ((void *)-1)
+
+MODULE_AUTHOR("Microchip");
+MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
DRIVER_VERSION);
-MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+struct pqi_cmd_priv {
+ int this_residual;
+};
+
+static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static void pqi_verify_structures(void);
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info);
static void pqi_ctrl_offline_worker(struct work_struct *work);
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -69,20 +78,29 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request);
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+ struct pqi_raid_error_info *error_info);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd);
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested);
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
+static void pqi_tmf_worker(struct work_struct *work);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -152,14 +170,36 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
"\t\tSupported: none, reboot, panic\n"
"\t\tDefault: none");
+static int pqi_expose_ld_first;
+module_param_named(expose_ld_first,
+ pqi_expose_ld_first, int, 0644);
+MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
+
+static int pqi_hide_vsep;
+module_param_named(hide_vsep,
+ pqi_hide_vsep, int, 0644);
+MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
"RAID-1(1+0)",
"RAID-5",
"RAID-5+1",
- "RAID-ADG",
- "RAID-1(ADM)",
+ "RAID-6",
+ "RAID-1(Triple)",
};
static char *pqi_raid_level_to_string(u8 raid_level)
@@ -176,14 +216,14 @@ static char *pqi_raid_level_to_string(u8 raid_level)
#define SA_RAID_5 3 /* also used for RAID 50 */
#define SA_RAID_51 4
#define SA_RAID_6 5 /* also used for RAID 60 */
-#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define SA_RAID_MAX SA_RAID_ADM
+#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
+#define SA_RAID_MAX SA_RAID_TRIPLE
#define SA_RAID_UNKNOWN 0xff
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
pqi_prep_for_scsi_done(scmd);
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
static inline void pqi_disable_write_same(struct scsi_device *sdev)
@@ -206,11 +246,16 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
if (!sis_is_firmware_running(ctrl_info))
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
}
static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
@@ -218,66 +263,165 @@ static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
}
-static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
- struct pqi_ctrl_info *ctrl_info)
+#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
+#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
+
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{
- return sis_read_driver_scratch(ctrl_info);
+ return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
}
static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_mode mode)
{
- sis_write_driver_scratch(ctrl_info, mode);
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (mode == PQI_MODE)
+ driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
+}
+
+static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
+{
+ return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
+}
+
+static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
+{
+ u32 driver_scratch;
+
+ driver_scratch = sis_read_driver_scratch(ctrl_info);
+
+ if (is_supported)
+ driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+ else
+ driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
+
+ sis_write_driver_scratch(ctrl_info, driver_scratch);
+}
+
+static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = true;
+ mutex_lock(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->scan_blocked = false;
+ mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->scan_blocked;
+}
+
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
+static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ struct Scsi_Host *shost;
+ unsigned int num_loops;
+ int msecs_sleep;
+
+ shost = ctrl_info->scsi_host;
+
+ scsi_block_requests(shost);
+
+ num_loops = 0;
+ msecs_sleep = 20;
+ while (scsi_host_busy(shost)) {
+ num_loops++;
+ if (num_loops == 10)
+ msecs_sleep = 500;
+ msleep(msecs_sleep);
+ }
+}
+
+static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
}
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
- scsi_block_requests(ctrl_info->scsi_host);
}
static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = false;
wake_up_all(&ctrl_info->block_requests_wait);
- pqi_retry_raid_bypass_requests(ctrl_info);
- scsi_unblock_requests(ctrl_info->scsi_host);
}
-static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_msecs)
+static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{
- unsigned long remaining_msecs;
-
if (!pqi_ctrl_blocked(ctrl_info))
- return timeout_msecs;
+ return;
atomic_inc(&ctrl_info->num_blocked_threads);
-
- if (timeout_msecs == NO_TIMEOUT) {
- wait_event(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info));
- remaining_msecs = timeout_msecs;
- } else {
- unsigned long remaining_jiffies;
-
- remaining_jiffies =
- wait_event_timeout(ctrl_info->block_requests_wait,
- !pqi_ctrl_blocked(ctrl_info),
- msecs_to_jiffies(timeout_msecs));
- remaining_msecs = jiffies_to_msecs(remaining_jiffies);
- }
-
+ wait_event(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info));
atomic_dec(&ctrl_info->num_blocked_threads);
-
- return remaining_msecs;
}
+#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
+
static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
+
while (atomic_read(&ctrl_info->num_busy_threads) >
- atomic_read(&ctrl_info->num_blocked_threads))
+ atomic_read(&ctrl_info->num_blocked_threads)) {
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for driver activity to quiesce\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+ displayed_warning = true;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
+ }
usleep_range(1000, 2000);
+ }
+
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "driver activity quiesced after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
}
static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
@@ -285,54 +429,73 @@ static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
return device->device_offline;
}
-static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
{
- device->in_reset = true;
+ mutex_lock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
{
- device->in_reset = false;
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
{
- return device->in_reset;
+ mutex_lock(&ctrl_info->ofa_mutex);
+ mutex_unlock(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->in_ofa = true;
+ return mutex_is_locked(&ctrl_info->ofa_mutex);
}
-static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
{
- ctrl_info->in_ofa = false;
+ device->in_remove = true;
}
-static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
{
- return ctrl_info->in_ofa;
+ return device->in_remove;
}
-static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
{
- device->in_remove = true;
+ device->in_reset[lun] = true;
}
-static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
{
- return device->in_remove && !ctrl_info->in_shutdown;
+ device->in_reset[lun] = false;
}
-static inline void pqi_schedule_rescan_worker_with_delay(
- struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
+{
+ return device->in_reset[lun];
+}
+
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+ if (event_type == pqi_supported_event_types[index])
+ return index;
+
+ return -1;
+}
+
+static inline bool pqi_is_supported_event(unsigned int event_type)
+{
+ return pqi_event_type_to_event_index(event_type) != -1;
+}
+
+static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
+ unsigned long delay)
{
if (pqi_ctrl_offline(ctrl_info))
return;
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
schedule_delayed_work(&ctrl_info->rescan_work, delay);
}
@@ -342,10 +505,9 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}
-#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
-static inline void pqi_schedule_rescan_worker_delayed(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
}
@@ -365,25 +527,48 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
- if (!ctrl_info->soft_reset_status)
- return 0;
-
return readb(ctrl_info->soft_reset_status);
}
-static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{
u8 status;
- if (!ctrl_info->soft_reset_status)
- return;
-
status = pqi_read_soft_reset_status(ctrl_info);
- status &= ~clear;
+ status &= ~PQI_SOFT_RESET_ABORT;
writeb(status, ctrl_info->soft_reset_status);
}
+static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@@ -456,10 +641,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
case CISS_REPORT_PHYS:
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
- if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
- else
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ if (cmd == CISS_REPORT_PHYS) {
+ if (ctrl_info->rpl_extended_format_4_5_supported)
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
+ else
+ cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
+ } else {
+ cdb[1] = ctrl_info->ciss_report_log_flags;
+ }
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -469,6 +658,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case SA_FLUSH_CACHE:
+ request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
cdb[6] = BMIC_FLUSH_CACHE;
@@ -476,9 +666,11 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SENSE_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ case BMIC_SENSE_SUBSYSTEM_INFORMATION:
+ case BMIC_SENSE_FEATURE:
request->data_direction = SOP_READ_FLAG;
cdb[0] = BMIC_READ;
cdb[6] = cmd;
@@ -486,7 +678,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
break;
case BMIC_SET_DIAG_OPTIONS:
cdb_length = 0;
- /* fall through */
+ fallthrough;
case BMIC_WRITE_HOST_WELLNESS:
request->data_direction = SOP_WRITE_FLAG;
cdb[0] = BMIC_WRITE;
@@ -501,8 +693,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be16(cdb_length, &cdb[7]);
break;
default:
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
- cmd);
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
break;
}
@@ -533,24 +724,38 @@ static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
io_request->raid_bypass = false;
}
-static struct pqi_io_request *pqi_alloc_io_request(
- struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
struct pqi_io_request *io_request;
- u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
+ u16 i;
- while (1) {
+ if (scmd) { /* SML I/O request */
+ u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ i = blk_mq_unique_tag_to_tag(blk_tag);
io_request = &ctrl_info->io_request_pool[i];
- if (atomic_inc_return(&io_request->refcount) == 1)
- break;
- atomic_dec(&io_request->refcount);
- i = (i + 1) % ctrl_info->max_io_slots;
+ if (atomic_inc_return(&io_request->refcount) > 1) {
+ atomic_dec(&io_request->refcount);
+ return NULL;
+ }
+ } else { /* IOCTL or driver internal request */
+ /*
+ * benignly racy - may have to wait for an open slot.
+ * command slot range is scsi_ml_can_queue -
+ * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
+ */
+ i = 0;
+ while (1) {
+ io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
+ if (atomic_inc_return(&io_request->refcount) == 1)
+ break;
+ atomic_dec(&io_request->refcount);
+ i = (i + 1) % PQI_RESERVED_IO_SLOTS;
+ }
}
- /* benignly racy */
- ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
-
- pqi_reinit_io_request(io_request);
+ if (io_request)
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -561,156 +766,184 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
}
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info,
- unsigned long timeout_msecs)
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info)
{
int rc;
- enum dma_data_direction dir;
struct pqi_raid_path_request request;
+ enum dma_data_direction dir;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, scsi3addr, buffer,
- buffer_length, vpd_page, &dir);
+ rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
+ buffer, buffer_length, vpd_page, &dir);
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
-/* Helper functions for pqi_send_scsi_raid_request */
+/* helper functions for pqi_send_scsi_raid_request */
static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length)
+ u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length,
- struct pqi_raid_error_info *error_info)
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info);
}
-
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+ struct bmic_identify_controller *buffer)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
- buffer, sizeof(*buffer));
+ buffer, sizeof(*buffer));
+}
+
+static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_sense_subsystem_info *sense_info)
+{
+ return pqi_send_ctrl_raid_request(ctrl_info,
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+ sizeof(*sense_info));
}
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
- buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
+ buffer, buffer_length, vpd_page, NULL);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
+static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
+ enum dma_data_direction dir;
+ u16 bmic_device_index;
+ struct pqi_raid_path_request request;
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
+ rc = pqi_build_raid_path_request(ctrl_info, &request,
+ BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
+ buffer_length, 0, &dir);
+ if (rc)
+ return rc;
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
+ bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
+ request.cdb[2] = (u8)bmic_device_index;
+ request.cdb[9] = (u8)(bmic_device_index >> 8);
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
-exit_supported:
- kfree(buf);
- return true;
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
+ return rc;
}
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
+static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
{
- int rc;
- unsigned char *buf;
+ u32 bytes;
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
+ bytes = get_unaligned_le16(limit);
+ if (bytes == 0)
+ bytes = ~0;
+ else
+ bytes *= 1024;
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ return bytes;
+}
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
+#pragma pack(1)
- kfree(buf);
+struct bmic_sense_feature_buffer {
+ struct bmic_sense_feature_buffer_header header;
+ struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
+};
- return rc;
-}
+#pragma pack()
-static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device,
- struct bmic_identify_physical_device *buffer,
- size_t buffer_length)
+#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
+ offsetofend(struct bmic_sense_feature_buffer, \
+ aio_subpage.max_write_raid_1_10_3drive)
+
+#define MINIMUM_AIO_SUBPAGE_LENGTH \
+ (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
+ max_write_raid_1_10_3drive) - \
+ sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
+
+static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
{
int rc;
enum dma_data_direction dir;
- u16 bmic_device_index;
struct pqi_raid_path_request request;
+ struct bmic_sense_feature_buffer *buffer;
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &dir);
+ buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
+ buffer, sizeof(*buffer), 0, &dir);
if (rc)
- return rc;
+ goto error;
- bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
- request.cdb[2] = (u8)bmic_device_index;
- request.cdb[9] = (u8)(bmic_device_index >> 8);
+ request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
+ request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
+ if (rc)
+ goto error;
+
+ if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->header.buffer_length) <
+ MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
+ buffer->aio_subpage.header.page_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE ||
+ buffer->aio_subpage.header.subpage_code !=
+ BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
+ get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
+ MINIMUM_AIO_SUBPAGE_LENGTH) {
+ goto error;
+ }
+
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
+
+ ctrl_info->max_transfer_encrypted_nvme =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_transfer_encrypted_nvme);
+
+ ctrl_info->max_write_raid_5_6 =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_5_6);
+
+ ctrl_info->max_write_raid_1_10_2drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_2drive);
+
+ ctrl_info->max_write_raid_1_10_3drive =
+ pqi_aio_limit_to_bytes(
+ &buffer->aio_subpage.max_write_raid_1_10_3drive);
+
+error:
+ kfree(buffer);
+
return rc;
}
@@ -720,13 +953,6 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
int rc;
struct bmic_flush_cache *flush_cache;
- /*
- * Don't bother trying to flush the cache if the controller is
- * locked up.
- */
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
if (!flush_cache)
return -ENOMEM;
@@ -749,7 +975,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, error_info);
}
-#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
@@ -761,14 +987,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
return -ENOMEM;
rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ diag, sizeof(*diag));
if (rc)
goto out;
diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
- rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+ sizeof(*diag));
+
out:
kfree(diag);
@@ -779,7 +1006,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
- buffer, buffer_length);
+ buffer, buffer_length);
}
#pragma pack(1)
@@ -816,9 +1043,8 @@ static int pqi_write_driver_version_to_host_wellness(
buffer->driver_version_tag[1] = 'V';
put_unaligned_le16(sizeof(buffer->driver_version),
&buffer->driver_version_length);
- strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
- sizeof(buffer->driver_version) - 1);
- buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+ strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
+ sizeof(buffer->driver_version));
buffer->dont_write_tag[0] = 'D';
buffer->dont_write_tag[1] = 'W';
buffer->end_tag[0] = 'Z';
@@ -894,7 +1120,7 @@ static int pqi_write_current_time_to_host_wellness(
return rc;
}
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
static void pqi_update_time_worker(struct work_struct *work)
{
@@ -904,9 +1130,6 @@ static void pqi_update_time_worker(struct work_struct *work)
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
update_time_work);
- if (pqi_ctrl_offline(ctrl_info))
- return;
-
rc = pqi_write_current_time_to_host_wellness(ctrl_info);
if (rc)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -916,27 +1139,23 @@ static void pqi_update_time_worker(struct work_struct *work)
PQI_UPDATE_TIME_WORK_INTERVAL);
}
-static inline void pqi_schedule_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
schedule_delayed_work(&ctrl_info->update_time_work, 0);
}
-static inline void pqi_cancel_update_time_worker(
- struct pqi_ctrl_info *ctrl_info)
+static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{
cancel_delayed_work_sync(&ctrl_info->update_time_work);
}
-static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void *buffer, size_t buffer_length)
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
+ size_t buffer_length)
{
- return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
}
-static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- void **buffer)
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
{
int rc;
size_t lun_list_length;
@@ -951,8 +1170,7 @@ static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
goto out;
}
- rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
- sizeof(*report_lun_header));
+ rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
if (rc)
goto out;
@@ -976,8 +1194,8 @@ again:
if (rc)
goto out;
- new_lun_list_length = get_unaligned_be32(
- &((struct report_lun_header *)lun_data)->list_length);
+ new_lun_list_length =
+ get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
if (new_lun_list_length > lun_list_length) {
lun_list_length = new_lun_list_length;
@@ -998,28 +1216,81 @@ out:
return rc;
}
-static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
- return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
- buffer);
+ int rc;
+ unsigned int i;
+ u8 rpl_response_format;
+ u32 num_physicals;
+ void *rpl_list;
+ struct report_lun_header *rpl_header;
+ struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
+ struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
+
+ rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
+ if (rc)
+ return rc;
+
+ if (ctrl_info->rpl_extended_format_4_5_supported) {
+ rpl_header = rpl_list;
+ rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
+ if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
+ *buffer = rpl_list;
+ return 0;
+ } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "RPL returned unsupported data format %u\n",
+ rpl_response_format);
+ return -EINVAL;
+ } else {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "RPL returned extended format 2 instead of 4\n");
+ }
+ }
+
+ rpl_8byte_wwid_list = rpl_list;
+ num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
+
+ rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
+ num_physicals), GFP_KERNEL);
+ if (!rpl_16byte_wwid_list)
+ return -ENOMEM;
+
+ put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
+ &rpl_16byte_wwid_list->header.list_length);
+ rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
+
+ for (i = 0; i < num_physicals; i++) {
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
+ rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
+ rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
+ rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
+ rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
+ rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
+ }
+
+ kfree(rpl_8byte_wwid_list);
+ *buffer = rpl_16byte_wwid_list;
+
+ return 0;
}
-static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
- void **buffer)
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{
return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
}
static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
- struct report_phys_lun_extended **physdev_list,
- struct report_log_lun_extended **logdev_list)
+ struct report_phys_lun_16byte_wwid_list **physdev_list,
+ struct report_log_lun_list **logdev_list)
{
int rc;
size_t logdev_list_length;
size_t logdev_data_length;
- struct report_log_lun_extended *internal_logdev_list;
- struct report_log_lun_extended *logdev_data;
+ struct report_log_lun_list *internal_logdev_list;
+ struct report_log_lun_list *logdev_data;
struct report_lun_header report_lun_header;
rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
@@ -1033,7 +1304,8 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
"report logical LUNs failed\n");
/*
- * Tack the controller itself onto the end of the logical device list.
+ * Tack the controller itself onto the end of the logical device list
+ * by adding a list entry that is all zeros.
*/
logdev_data = *logdev_list;
@@ -1044,7 +1316,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
} else {
memset(&report_lun_header, 0, sizeof(report_lun_header));
logdev_data =
- (struct report_log_lun_extended *)&report_lun_header;
+ (struct report_log_lun_list *)&report_lun_header;
logdev_list_length = 0;
}
@@ -1052,7 +1324,7 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
logdev_list_length;
internal_logdev_list = kmalloc(logdev_data_length +
- sizeof(struct report_log_lun_extended), GFP_KERNEL);
+ sizeof(struct report_log_lun), GFP_KERNEL);
if (!internal_logdev_list) {
kfree(*logdev_list);
*logdev_list = NULL;
@@ -1061,9 +1333,9 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
memcpy(internal_logdev_list, logdev_data, logdev_data_length);
memset((u8 *)internal_logdev_list + logdev_data_length, 0,
- sizeof(struct report_log_lun_extended_entry));
+ sizeof(struct report_log_lun));
put_unaligned_be32(logdev_list_length +
- sizeof(struct report_log_lun_extended_entry),
+ sizeof(struct report_log_lun),
&internal_logdev_list->header.list_length);
kfree(*logdev_list);
@@ -1163,9 +1435,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
err_msg = "invalid RAID-1 map";
goto bad_raid_map;
}
- } else if (device->raid_level == SA_RAID_ADM) {
+ } else if (device->raid_level == SA_RAID_TRIPLE) {
if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
- err_msg = "invalid RAID-1(ADM) map";
+ err_msg = "invalid RAID-1(Triple) map";
goto bad_raid_map;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -1204,9 +1476,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, sizeof(*raid_map),
- 0, NULL, NO_TIMEOUT);
-
+ device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
if (rc)
goto error;
@@ -1221,17 +1491,17 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
return -ENOMEM;
rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
- device->scsi3addr, raid_map, raid_map_size,
- 0, NULL, NO_TIMEOUT);
+ device->scsi3addr, raid_map, raid_map_size, 0, NULL);
if (rc)
goto error;
if (get_unaligned_le32(&raid_map->structure_size)
!= raid_map_size) {
dev_warn(&ctrl_info->pci_dev->dev,
- "Requested %d bytes, received %d bytes",
+ "requested %u bytes, received %u bytes\n",
raid_map_size,
get_unaligned_le32(&raid_map->structure_size));
+ rc = -EINVAL;
goto error;
}
}
@@ -1240,6 +1510,12 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto error;
+ device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
+ if (!device->raid_io_stats) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
device->raid_map = raid_map;
return 0;
@@ -1250,6 +1526,39 @@ error:
return rc;
}
+static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ if (!ctrl_info->lv_drive_type_mix_valid) {
+ device->max_transfer_encrypted = ~0;
+ return;
+ }
+
+ switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
+ case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
+ case LV_DRIVE_TYPE_MIX_SAS_ONLY:
+ case LV_DRIVE_TYPE_MIX_SATA_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_sas_sata;
+ break;
+ case LV_DRIVE_TYPE_MIX_NVME_ONLY:
+ device->max_transfer_encrypted =
+ ctrl_info->max_transfer_encrypted_nvme;
+ break;
+ case LV_DRIVE_TYPE_MIX_UNKNOWN:
+ case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
+ default:
+ device->max_transfer_encrypted =
+ min(ctrl_info->max_transfer_encrypted_sas_sata,
+ ctrl_info->max_transfer_encrypted_nvme);
+ break;
+ }
+}
+
static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
@@ -1266,17 +1575,21 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
-#define RAID_BYPASS_STATUS 4
-#define RAID_BYPASS_CONFIGURED 0x1
-#define RAID_BYPASS_ENABLED 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
bypass_status = buffer[RAID_BYPASS_STATUS];
device->raid_bypass_configured =
(bypass_status & RAID_BYPASS_CONFIGURED) != 0;
if (device->raid_bypass_configured &&
(bypass_status & RAID_BYPASS_ENABLED) &&
- pqi_get_raid_map(ctrl_info, device) == 0)
+ pqi_get_raid_map(ctrl_info, device) == 0) {
device->raid_bypass_enabled = true;
+ if (get_unaligned_le16(&device->raid_map->flags) &
+ RAID_MAP_ENCRYPTION_ENABLED)
+ pqi_set_max_transfer_encrypted(ctrl_info, device);
+ }
out:
kfree(buffer);
@@ -1324,33 +1637,76 @@ no_buffer:
device->volume_offline = volume_offline;
}
-#define PQI_INQUIRY_PAGE0_RETRIES 3
+#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
+#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
+#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ rc = pqi_identify_physical_device(ctrl_info, device,
+ id_phys, sizeof(*id_phys));
+ if (rc) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return rc;
+ }
+
+ scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
+ scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
+
+ memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
+ memcpy(device->model, &id_phys->model[8], sizeof(device->model));
+
+ device->box_index = id_phys->box_index;
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
+ device->queue_depth =
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+ device->lun_count = id_phys->multi_lun_device_lun_count;
+ if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
+ id_phys->phy_count)
+ device->phy_id =
+ id_phys->phy_to_phy_map[device->active_path_index];
+ else
+ device->phy_id = 0xFF;
+
+ device->ncq_prio_support =
+ ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
+ PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+
+ device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
+
+ return 0;
+}
+
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- unsigned int retries;
-
- if (device->is_expander_smp_device)
- return 0;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- for (retries = 0;;) {
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
- buffer, 64);
- if (rc == 0)
- break;
- if (pqi_is_logical_device(device) ||
- rc != PQI_CMD_STATUS_ABORTED ||
- ++retries > PQI_INQUIRY_PAGE0_RETRIES)
- goto out;
- }
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+ if (rc)
+ goto out;
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -1359,7 +1715,7 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
memcpy(device->model, &buffer[16], sizeof(device->model));
- if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+ if (device->devtype == TYPE_DISK) {
if (device->is_external_raid_device) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = CISS_LV_OK;
@@ -1371,47 +1727,56 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
return rc;
}
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+/*
+ * Prevent adding drive to OS for some corner cases such as a drive
+ * undergoing a sanitize (erase) operation. Some OSes will continue to poll
+ * the drive until the sanitize completes, which can take hours,
+ * resulting in long bootup delays. Commands such as TUR, READ_CAP
+ * are allowed, but READ/WRITE cause check condition. So the OS
+ * cannot check/read the partition table.
+ * Note: devices that have completed sanitize must be re-enabled
+ * using the management utility.
+ */
+static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
+{
+ return device->erase_in_progress;
+}
+
+static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
{
int rc;
- memset(id_phys, 0, sizeof(*id_phys));
+ if (device->is_expander_smp_device)
+ return 0;
- rc = pqi_identify_physical_device(ctrl_info, device,
- id_phys, sizeof(*id_phys));
- if (rc) {
- device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- return;
- }
+ if (pqi_is_logical_device(device))
+ rc = pqi_get_logical_device_info(ctrl_info, device);
+ else
+ rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
- device->queue_depth =
- get_unaligned_le16(&id_phys->current_queue_depth_limit);
- device->device_type = id_phys->device_type;
- device->active_path_index = id_phys->active_path_number;
- device->path_map = id_phys->redundant_path_present_map;
- memcpy(&device->box,
- &id_phys->alternate_paths_phys_box_on_port,
- sizeof(device->box));
- memcpy(&device->phys_connector,
- &id_phys->alternate_paths_phys_connector,
- sizeof(device->phys_connector));
- device->bay = id_phys->phys_bay_in_box;
+ return rc;
+}
+
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+
+ rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
+
+ if (rc == 0 && device->lun_count == 0)
+ device->lun_count = 1;
+
+ return rc;
}
static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
@@ -1541,28 +1906,30 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-#define PQI_PENDING_IO_TIMEOUT_SECS 20
+#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
-static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- pqi_device_remove_start(device);
-
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_PENDING_IO_TIMEOUT_SECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
else
pqi_remove_sas_device(device);
+
+ pqi_device_remove_start(device);
}
/* Assumes the SCSI device list lock is held. */
@@ -1572,26 +1939,22 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- if (device->bus == bus && device->target == target &&
- device->lun == lun)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->bus == bus && device->target == target && device->lun == lun)
return device;
return NULL;
}
-static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
- struct pqi_scsi_dev *dev2)
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
{
if (dev1->is_physical_device != dev2->is_physical_device)
return false;
if (dev1->is_physical_device)
- return dev1->wwid == dev2->wwid;
+ return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
- return memcmp(dev1->volume_id, dev2->volume_id,
- sizeof(dev1->volume_id)) == 0;
+ return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
}
enum pqi_find_result {
@@ -1601,15 +1964,12 @@ enum pqi_find_result {
};
static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device_to_find,
- struct pqi_scsi_dev **matching_device)
+ struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
- device->scsi3addr)) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
*matching_device = device;
if (pqi_device_equal(device_to_find, device)) {
if (device_to_find->volume_offline)
@@ -1639,32 +1999,47 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
ssize_t count;
char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
- count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+ count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
if (device->target_lun_valid)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"%d:%d",
device->target,
device->lun);
else
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"-:-");
- if (pqi_is_logical_device(device))
- count += snprintf(buffer + count,
+ if (pqi_is_logical_device(device)) {
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" %08x%08x",
*((u32 *)&device->scsi3addr),
*((u32 *)&device->scsi3addr[4]));
- else
- count += snprintf(buffer + count,
+ } else if (ctrl_info->rpl_extended_format_4_5_supported) {
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx%016llx",
+ get_unaligned_be64(&device->wwid[0]),
+ get_unaligned_be64(&device->wwid[8]));
+ else
+ count += scnprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ } else {
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
- " %016llx", device->sas_address);
+ " %016llx",
+ get_unaligned_be64(&device->wwid[0]));
+ }
- count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+
+ count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
" %s %.8s %.16s ",
pqi_device_type(device),
device->vendor,
@@ -1672,19 +2047,19 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
if (pqi_is_logical_device(device)) {
if (device->devtype == TYPE_DISK)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"SSDSmartPathCap%c En%c %-12s",
device->raid_bypass_configured ? '+' : '-',
device->raid_bypass_enabled ? '+' : '-',
pqi_raid_level_to_string(device->raid_level));
} else {
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
"AIO%c", device->aio_enabled ? '+' : '-');
if (device->devtype == TYPE_DISK ||
device->devtype == TYPE_ZBC)
- count += snprintf(buffer + count,
+ count += scnprintf(buffer + count,
PQI_DEV_INFO_BUFFER_LENGTH - count,
" qd=%-6d", device->queue_depth);
}
@@ -1692,12 +2067,28 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
-static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
- struct pqi_scsi_dev *new_device)
+static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{
- existing_device->devtype = new_device->devtype;
existing_device->device_type = new_device->device_type;
existing_device->bus = new_device->bus;
if (new_device->target_lun_valid) {
@@ -1709,43 +2100,53 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- existing_device->offload_to_mirror = 0;
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
-
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ existing_device->lun_count = new_device->lun_count;
+
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
+ existing_device->raid_io_stats = new_device->raid_io_stats;
+ new_device->raid_io_stats = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
{
if (device) {
+ free_percpu(device->raid_io_stats);
kfree(device->raid_map);
kfree(device);
}
@@ -1777,6 +2178,29 @@ static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
return device->sdev != NULL;
}
+static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+{
+ unsigned int lun;
+ struct pqi_tmf_work *tmf_work;
+
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+}
+
+static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
+{
+ if (pqi_device_in_remove(device))
+ return false;
+
+ if (device->sdev == NULL)
+ return false;
+
+ if (!scsi_device_online(device->sdev))
+ return false;
+
+ return device->rescan;
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -1800,15 +2224,14 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
/* Assume that all devices in the existing list have gone away. */
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
device->device_gone = true;
for (i = 0; i < num_new_devices; i++) {
device = new_device_list[i];
find_result = pqi_scsi_find_entry(ctrl_info, device,
- &matching_device);
+ &matching_device);
switch (find_result) {
case DEVICE_SAME:
@@ -1818,7 +2241,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
device->new_device = false;
matching_device->device_gone = false;
- pqi_scsi_update_device(matching_device, device);
+ pqi_scsi_update_device(ctrl_info, matching_device, device);
break;
case DEVICE_NOT_FOUND:
/*
@@ -1858,16 +2281,26 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_add_tail(&device->add_list_entry, &add_list);
/* To prevent this device structure from being freed later. */
device->keep_device = true;
+ pqi_init_device_tmf_work(device);
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- if (pqi_ctrl_in_ofa(ctrl_info))
- pqi_ctrl_ofa_done(ctrl_info);
+ /*
+ * If OFA is in progress and there are devices that need to be deleted,
+ * allow any pending reset operations to continue and unblock any SCSI
+ * requests before removal.
+ */
+ if (pqi_ofa_in_progress(ctrl_info)) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
+ if (pqi_is_device_added(device))
+ pqi_device_remove_start(device);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ }
/* Remove all devices that have gone away. */
- list_for_each_entry_safe(device, next, &delete_list,
- delete_list_entry) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
@@ -1881,25 +2314,37 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
/*
- * Notify the SCSI ML if the queue depth of any existing device has
- * changed.
+ * Notify the SML of any existing device changes such as;
+ * queue depth, device size.
*/
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->sdev && device->queue_depth !=
- device->advertised_queue_depth) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ /*
+ * Check for queue depth change.
+ */
+ if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
- scsi_change_queue_depth(device->sdev,
- device->advertised_queue_depth);
+ scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+ }
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ /*
+ * Check for changes in the device, such as size.
+ */
+ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
if (!pqi_is_device_added(device)) {
- pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
- if (rc) {
+ if (rc == 0) {
+ pqi_dev_info(ctrl_info, "added", device);
+ } else {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d addition failed, device not added\n",
ctrl_info->scsi_host->host_no,
@@ -1909,38 +2354,22 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
}
+
}
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
{
- bool is_supported;
-
- if (device->is_expander_smp_device)
- return true;
-
- is_supported = false;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_TAPE:
- case TYPE_MEDIUM_CHANGER:
- case TYPE_ENCLOSURE:
- is_supported = true;
- break;
- case TYPE_RAID:
- /*
- * Only support the HBA controller itself as a RAID
- * controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, for
- * example), we don't support it.
- */
- if (pqi_is_hba_lunid(device->scsi3addr))
- is_supported = true;
- break;
- }
+ /*
+ * Only support the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
+ */
+ if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
+ !pqi_is_hba_lunid(device->scsi3addr))
+ return false;
- return is_supported;
+ return true;
}
static inline bool pqi_skip_device(u8 *scsi3addr)
@@ -1952,28 +2381,14 @@ static inline bool pqi_skip_device(u8 *scsi3addr)
return false;
}
-static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+static inline void pqi_mask_device(u8 *scsi3addr)
{
- if (!device->is_physical_device)
- return false;
-
- if (device->is_expander_smp_device)
- return true;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_ENCLOSURE:
- return true;
- }
-
- return false;
+ scsi3addr[3] |= 0xc0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{
- return !device->is_physical_device ||
- !pqi_skip_device(device->scsi3addr);
+ return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
}
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -1981,10 +2396,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
int i;
int rc;
LIST_HEAD(new_device_list_head);
- struct report_phys_lun_extended *physdev_list = NULL;
- struct report_log_lun_extended *logdev_list = NULL;
- struct report_phys_lun_extended_entry *phys_lun_ext_entry;
- struct report_log_lun_extended_entry *log_lun_ext_entry;
+ struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
+ struct report_log_lun_list *logdev_list = NULL;
+ struct report_phys_lun_16byte_wwid *phys_lun;
+ struct report_log_lun *log_lun;
struct bmic_identify_physical_device *id_phys = NULL;
u32 num_physicals;
u32 num_logicals;
@@ -1995,6 +2410,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
unsigned int num_valid_devices;
bool is_physical_device;
u8 *scsi3addr;
+ unsigned int physical_index;
+ unsigned int logical_index;
static char *out_of_memory_msg =
"failed to allocate memory, device discovery stopped";
@@ -2030,8 +2447,22 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
+
+ if (pqi_hide_vsep) {
+ for (i = num_physicals - 1; i >= 0; i--) {
+ phys_lun = &physdev_list->lun_entries[i];
+ if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
+ pqi_mask_device(phys_lun->lunid);
+ break;
+ }
+ }
+ }
}
+ if (num_logicals &&
+ (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
+ ctrl_info->lv_drive_type_mix_valid = true;
+
num_new_devices = num_physicals + num_logicals;
new_device_list = kmalloc_array(num_new_devices,
@@ -2057,20 +2488,22 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device = NULL;
num_valid_devices = 0;
+ physical_index = 0;
+ logical_index = 0;
for (i = 0; i < num_new_devices; i++) {
- if (i < num_physicals) {
+ if ((!pqi_expose_ld_first && i < num_physicals) ||
+ (pqi_expose_ld_first && i >= num_logicals)) {
is_physical_device = true;
- phys_lun_ext_entry = &physdev_list->lun_entries[i];
- log_lun_ext_entry = NULL;
- scsi3addr = phys_lun_ext_entry->lunid;
+ phys_lun = &physdev_list->lun_entries[physical_index++];
+ log_lun = NULL;
+ scsi3addr = phys_lun->lunid;
} else {
is_physical_device = false;
- phys_lun_ext_entry = NULL;
- log_lun_ext_entry =
- &logdev_list->lun_entries[i - num_physicals];
- scsi3addr = log_lun_ext_entry->lunid;
+ phys_lun = NULL;
+ log_lun = &logdev_list->lun_entries[logical_index++];
+ scsi3addr = log_lun->lunid;
}
if (is_physical_device && pqi_skip_device(scsi3addr))
@@ -2085,16 +2518,19 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
if (is_physical_device) {
- if (phys_lun_ext_entry->device_type ==
- SA_EXPANDER_SMP_DEVICE)
+ device->device_type = phys_lun->device_type;
+ if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
pqi_is_external_raid_addr(scsi3addr);
}
+ if (!pqi_is_supported_device(device))
+ continue;
+
/* Gather information about the device. */
- rc = pqi_get_device_info(ctrl_info, device);
+ rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
out_of_memory_msg);
@@ -2103,9 +2539,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (rc) {
if (device->is_physical_device)
dev_warn(&ctrl_info->pci_dev->dev,
- "obtaining device info failed, skipping physical device %016llx\n",
- get_unaligned_be64(
- &phys_lun_ext_entry->wwid));
+ "obtaining device info failed, skipping physical device %016llx%016llx\n",
+ get_unaligned_be64(&phys_lun->wwid[0]),
+ get_unaligned_be64(&phys_lun->wwid[8]));
else
dev_warn(&ctrl_info->pci_dev->dev,
"obtaining device info failed, skipping logical device %08x%08x\n",
@@ -2115,32 +2551,27 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
- if (!pqi_is_supported_device(device))
+ /* Do not present disks that the OS cannot fully probe. */
+ if (pqi_keep_device_offline(device))
continue;
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- device->wwid = phys_lun_ext_entry->wwid;
- if ((phys_lun_ext_entry->device_flags &
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
- phys_lun_ext_entry->aio_handle) {
- device->aio_enabled = true;
+ memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
+ if ((phys_lun->device_flags &
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
+ phys_lun->aio_handle) {
+ device->aio_enabled = true;
device->aio_handle =
- phys_lun_ext_entry->aio_handle;
- }
- if (device->devtype == TYPE_DISK ||
- device->devtype == TYPE_ZBC) {
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
+ phys_lun->aio_handle;
}
} else {
- memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+ memcpy(device->volume_id, log_lun->volume_id,
sizeof(device->volume_id));
}
- if (pqi_is_device_with_sas_address(device))
- device->sas_address = get_unaligned_be64(&device->wwid);
+ device->sas_address = get_unaligned_be64(&device->wwid[0]);
new_device_list[num_valid_devices++] = device;
}
@@ -2164,42 +2595,25 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
-
- while (1) {
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
- device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
- struct pqi_scsi_dev, scsi_device_list_entry);
- if (device)
- list_del(&device->scsi_device_list_entry);
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!device)
- break;
-
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- pqi_free_device(device);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ int mutex_acquired;
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
- mutex_lock(&ctrl_info->scan_mutex);
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+
+ if (!mutex_acquired) {
+ if (pqi_ctrl_scan_blocked(ctrl_info))
+ return -EBUSY;
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
+ return -EINPROGRESS;
+ }
rc = pqi_update_scsi_devices(ctrl_info);
- if (rc)
+ if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
pqi_schedule_rescan_worker_delayed(ctrl_info);
mutex_unlock(&ctrl_info->scan_mutex);
@@ -2212,8 +2626,6 @@ static void pqi_scan_start(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info;
ctrl_info = shost_to_hba(shost);
- if (pqi_ctrl_in_ofa(ctrl_info))
- return;
pqi_scan_scsi_devices(ctrl_info);
}
@@ -2230,27 +2642,8 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
return !mutex_is_locked(&ctrl_info->scan_mutex);
}
-static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->scan_mutex);
- mutex_unlock(&ctrl_info->scan_mutex);
-}
-
-static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->lun_reset_mutex);
- mutex_unlock(&ctrl_info->lun_reset_mutex);
-}
-
-static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
-{
- mutex_lock(&ctrl_info->ofa_mutex);
- mutex_unlock(&ctrl_info->ofa_mutex);
-}
-
-static inline void pqi_set_encryption_info(
- struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
- u64 first_block)
+static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
+ struct raid_map *raid_map, u64 first_block)
{
u32 volume_blk_size;
@@ -2273,333 +2666,419 @@ static inline void pqi_set_encryption_info(
* Attempt to perform RAID bypass mapping for a logical volume I/O.
*/
+static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ bool is_supported = true;
+
+ switch (rmd->raid_level) {
+ case SA_RAID_0:
+ break;
+ case SA_RAID_1:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
+ is_supported = false;
+ break;
+ case SA_RAID_TRIPLE:
+ if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
+ is_supported = false;
+ break;
+ case SA_RAID_5:
+ if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ case SA_RAID_6:
+ if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
+ rmd->data_length > ctrl_info->max_write_raid_5_6))
+ is_supported = false;
+ break;
+ default:
+ is_supported = false;
+ break;
+ }
+
+ return is_supported;
+}
+
#define PQI_RAID_BYPASS_INELIGIBLE 1
-static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
+ struct pqi_scsi_dev_raid_map_data *rmd)
{
- struct raid_map *raid_map;
- bool is_write = false;
- u32 map_index;
- u64 first_block;
- u64 last_block;
- u32 block_cnt;
- u32 blocks_per_row;
- u64 first_row;
- u64 last_row;
- u32 first_row_offset;
- u32 last_row_offset;
- u32 first_column;
- u32 last_column;
- u64 r0_first_row;
- u64 r0_last_row;
- u32 r5or6_blocks_per_row;
- u64 r5or6_first_row;
- u64 r5or6_last_row;
- u32 r5or6_first_row_offset;
- u32 r5or6_last_row_offset;
- u32 r5or6_first_column;
- u32 r5or6_last_column;
- u16 data_disks_per_row;
- u32 total_disks_per_row;
- u16 layout_map_count;
- u32 stripesize;
- u16 strip_size;
- u32 first_group;
- u32 last_group;
- u32 current_group;
- u32 map_row;
- u32 aio_handle;
- u64 disk_block;
- u32 disk_block_cnt;
- u8 cdb[16];
- u8 cdb_length;
- int offload_to_mirror;
- struct pqi_encryption_info *encryption_info_ptr;
- struct pqi_encryption_info encryption_info;
-#if BITS_PER_LONG == 32
- u64 tmpdiv;
-#endif
-
/* Check for valid opcode, get LBA and block count. */
switch (scmd->cmnd[0]) {
case WRITE_6:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_6:
- first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+ rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
- block_cnt = (u32)scmd->cmnd[4];
- if (block_cnt == 0)
- block_cnt = 256;
+ rmd->block_cnt = (u32)scmd->cmnd[4];
+ if (rmd->block_cnt == 0)
+ rmd->block_cnt = 256;
break;
case WRITE_10:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_10:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
break;
case WRITE_12:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_12:
- first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
break;
case WRITE_16:
- is_write = true;
- /* fall through */
+ rmd->is_write = true;
+ fallthrough;
case READ_16:
- first_block = get_unaligned_be64(&scmd->cmnd[2]);
- block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+ rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
+ rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
break;
default:
/* Process via normal I/O path. */
return PQI_RAID_BYPASS_INELIGIBLE;
}
- /* Check for write to non-RAID-0. */
- if (is_write && device->raid_level != SA_RAID_0)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
- if (unlikely(block_cnt == 0))
- return PQI_RAID_BYPASS_INELIGIBLE;
+ return 0;
+}
- last_block = first_block + block_cnt - 1;
- raid_map = device->raid_map;
+static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+
+ rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
/* Check for invalid block or wraparound. */
- if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
- last_block < first_block)
+ if (rmd->last_block >=
+ get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+ rmd->last_block < rmd->first_block)
return PQI_RAID_BYPASS_INELIGIBLE;
- data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
- strip_size = get_unaligned_le16(&raid_map->strip_size);
- layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+ rmd->data_disks_per_row =
+ get_unaligned_le16(&raid_map->data_disks_per_row);
+ rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
+ rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
/* Calculate stripe information for the request. */
- blocks_per_row = data_disks_per_row * strip_size;
+ rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, blocks_per_row);
- first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, blocks_per_row);
- last_row = tmpdiv;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- tmpdiv = first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = tmpdiv;
- tmpdiv = last_row_offset;
- do_div(tmpdiv, strip_size);
- last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_row = tmpdiv;
+ rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
+ tmpdiv = rmd->first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = tmpdiv;
+ tmpdiv = rmd->last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->last_column = tmpdiv;
#else
- first_row = first_block / blocks_per_row;
- last_row = last_block / blocks_per_row;
- first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
- last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
- first_column = first_row_offset / strip_size;
- last_column = last_row_offset / strip_size;
+ rmd->first_row = rmd->first_block / rmd->blocks_per_row;
+ rmd->last_row = rmd->last_block / rmd->blocks_per_row;
+ rmd->first_row_offset = (u32)(rmd->first_block -
+ (rmd->first_row * rmd->blocks_per_row));
+ rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
+ rmd->blocks_per_row));
+ rmd->first_column = rmd->first_row_offset / rmd->strip_size;
+ rmd->last_column = rmd->last_row_offset / rmd->strip_size;
#endif
/* If this isn't a single row/column then give to the controller. */
- if (first_row != last_row || first_column != last_column)
+ if (rmd->first_row != rmd->last_row ||
+ rmd->first_column != rmd->last_column)
return PQI_RAID_BYPASS_INELIGIBLE;
/* Proceeding with driver mapping. */
- total_disks_per_row = data_disks_per_row +
+ rmd->total_disks_per_row = rmd->data_disks_per_row +
get_unaligned_le16(&raid_map->metadata_disks_per_row);
- map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+ rmd->map_row = ((u32)(rmd->first_row >>
+ raid_map->parity_rotation_shift)) %
get_unaligned_le16(&raid_map->row_cnt);
- map_index = (map_row * total_disks_per_row) + first_column;
+ rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
+ rmd->first_column;
- /* RAID 1 */
- if (device->raid_level == SA_RAID_1) {
- if (device->offload_to_mirror)
- map_index += data_disks_per_row;
- device->offload_to_mirror = !device->offload_to_mirror;
- } else if (device->raid_level == SA_RAID_ADM) {
- /* RAID ADM */
- /*
- * Handles N-way mirrors (R1-ADM) and R10 with # of drives
- * divisible by 3.
- */
- offload_to_mirror = device->offload_to_mirror;
- if (offload_to_mirror == 0) {
- /* use physical disk in the first mirrored group. */
- map_index %= data_disks_per_row;
- } else {
- do {
- /*
- * Determine mirror group that map_index
- * indicates.
- */
- current_group = map_index / data_disks_per_row;
-
- if (offload_to_mirror != current_group) {
- if (current_group <
- layout_map_count - 1) {
- /*
- * Select raid index from
- * next group.
- */
- map_index += data_disks_per_row;
- current_group++;
- } else {
- /*
- * Select raid index from first
- * group.
- */
- map_index %= data_disks_per_row;
- current_group = 0;
- }
- }
- } while (offload_to_mirror != current_group);
- }
+ return 0;
+}
- /* Set mirror group to use next time. */
- offload_to_mirror =
- (offload_to_mirror >= layout_map_count - 1) ?
- 0 : offload_to_mirror + 1;
- WARN_ON(offload_to_mirror >= layout_map_count);
- device->offload_to_mirror = offload_to_mirror;
- /*
- * Avoid direct use of device->offload_to_mirror within this
- * function since multiple threads might simultaneously
- * increment it beyond the range of device->layout_map_count -1.
- */
- } else if ((device->raid_level == SA_RAID_5 ||
- device->raid_level == SA_RAID_6) && layout_map_count > 1) {
- /* RAID 50/60 */
- /* Verify first and last block are in same RAID group */
- r5or6_blocks_per_row = strip_size * data_disks_per_row;
- stripesize = r5or6_blocks_per_row * layout_map_count;
+static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
+ struct raid_map *raid_map)
+{
+#if BITS_PER_LONG == 32
+ u64 tmpdiv;
+#endif
+
+ if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* RAID 50/60 */
+ /* Verify first and last block are in same RAID group. */
+ rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_group = do_div(tmpdiv, stripesize);
- tmpdiv = first_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- first_group = tmpdiv;
- tmpdiv = last_block;
- last_group = do_div(tmpdiv, stripesize);
- tmpdiv = last_group;
- do_div(tmpdiv, r5or6_blocks_per_row);
- last_group = tmpdiv;
+ tmpdiv = rmd->first_block;
+ rmd->first_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->first_group = tmpdiv;
+ tmpdiv = rmd->last_block;
+ rmd->last_group = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->last_group;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->last_group = tmpdiv;
#else
- first_group = (first_block % stripesize) / r5or6_blocks_per_row;
- last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+ rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
+ rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
#endif
- if (first_group != last_group)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->first_group != rmd->last_group)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single row of RAID 5/6 */
+ /* Verify request is in a single row of RAID 5/6. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- do_div(tmpdiv, stripesize);
- first_row = r5or6_first_row = r0_first_row = tmpdiv;
- tmpdiv = last_block;
- do_div(tmpdiv, stripesize);
- r5or6_last_row = r0_last_row = tmpdiv;
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->first_row = tmpdiv;
+ rmd->r5or6_first_row = tmpdiv;
+ tmpdiv = rmd->last_block;
+ do_div(tmpdiv, rmd->stripesize);
+ rmd->r5or6_last_row = tmpdiv;
#else
- first_row = r5or6_first_row = r0_first_row =
- first_block / stripesize;
- r5or6_last_row = r0_last_row = last_block / stripesize;
+ rmd->first_row = rmd->r5or6_first_row =
+ rmd->first_block / rmd->stripesize;
+ rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
#endif
- if (r5or6_first_row != r5or6_last_row)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ if (rmd->r5or6_first_row != rmd->r5or6_last_row)
+ return PQI_RAID_BYPASS_INELIGIBLE;
- /* Verify request is in a single column */
+ /* Verify request is in a single column. */
#if BITS_PER_LONG == 32
- tmpdiv = first_block;
- first_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = first_row_offset;
- first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
- r5or6_first_row_offset = first_row_offset;
- tmpdiv = last_block;
- r5or6_last_row_offset = do_div(tmpdiv, stripesize);
- tmpdiv = r5or6_last_row_offset;
- r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
- tmpdiv = r5or6_first_row_offset;
- do_div(tmpdiv, strip_size);
- first_column = r5or6_first_column = tmpdiv;
- tmpdiv = r5or6_last_row_offset;
- do_div(tmpdiv, strip_size);
- r5or6_last_column = tmpdiv;
+ tmpdiv = rmd->first_block;
+ rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->first_row_offset;
+ rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->r5or6_first_row_offset = rmd->first_row_offset;
+ tmpdiv = rmd->last_block;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
+ tmpdiv = rmd->r5or6_last_row_offset;
+ rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
+ tmpdiv = rmd->r5or6_first_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->first_column = rmd->r5or6_first_column = tmpdiv;
+ tmpdiv = rmd->r5or6_last_row_offset;
+ do_div(tmpdiv, rmd->strip_size);
+ rmd->r5or6_last_column = tmpdiv;
#else
- first_row_offset = r5or6_first_row_offset =
- (u32)((first_block % stripesize) %
- r5or6_blocks_per_row);
+ rmd->first_row_offset = rmd->r5or6_first_row_offset =
+ (u32)((rmd->first_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->r5or6_last_row_offset =
+ (u32)((rmd->last_block % rmd->stripesize) %
+ rmd->blocks_per_row);
+
+ rmd->first_column =
+ rmd->r5or6_first_row_offset / rmd->strip_size;
+ rmd->r5or6_first_column = rmd->first_column;
+ rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
+#endif
+ if (rmd->r5or6_first_column != rmd->r5or6_last_column)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ /* Request is eligible. */
+ rmd->map_row =
+ ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
+ get_unaligned_le16(&raid_map->row_cnt);
+
+ rmd->map_index = (rmd->first_group *
+ (get_unaligned_le16(&raid_map->row_cnt) *
+ rmd->total_disks_per_row)) +
+ (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
- r5or6_last_row_offset =
- (u32)((last_block % stripesize) %
- r5or6_blocks_per_row);
+ if (rmd->is_write) {
+ u32 index;
- first_column = r5or6_first_row_offset / strip_size;
- r5or6_first_column = first_column;
- r5or6_last_column = r5or6_last_row_offset / strip_size;
+ /*
+ * p_parity_it_nexus and q_parity_it_nexus are pointers to the
+ * parity entries inside the device's raid_map.
+ *
+ * A device's RAID map is bounded by: number of RAID disks squared.
+ *
+ * The devices RAID map size is checked during device
+ * initialization.
+ */
+ index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
+ index *= rmd->total_disks_per_row;
+ index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
+
+ rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
+ if (rmd->raid_level == SA_RAID_6) {
+ rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
+ rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
+ }
+#if BITS_PER_LONG == 32
+ tmpdiv = rmd->first_block;
+ do_div(tmpdiv, rmd->blocks_per_row);
+ rmd->row = tmpdiv;
+#else
+ rmd->row = rmd->first_block / rmd->blocks_per_row;
#endif
- if (r5or6_first_column != r5or6_last_column)
- return PQI_RAID_BYPASS_INELIGIBLE;
+ }
- /* Request is eligible */
- map_row =
- ((u32)(first_row >> raid_map->parity_rotation_shift)) %
- get_unaligned_le16(&raid_map->row_cnt);
+ return 0;
+}
- map_index = (first_group *
- (get_unaligned_le16(&raid_map->row_cnt) *
- total_disks_per_row)) +
- (map_row * total_disks_per_row) + first_column;
+static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ /* Build the new CDB for the physical disk I/O. */
+ if (rmd->disk_block > 0xffffffff) {
+ rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
+ rmd->cdb[1] = 0;
+ put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
+ put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
+ rmd->cdb[14] = 0;
+ rmd->cdb[15] = 0;
+ rmd->cdb_length = 16;
+ } else {
+ rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
+ rmd->cdb[1] = 0;
+ put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
+ rmd->cdb[6] = 0;
+ put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
+ rmd->cdb[9] = 0;
+ rmd->cdb_length = 10;
}
+}
- aio_handle = raid_map->disk_data[map_index].aio_handle;
- disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
- first_row * strip_size +
- (first_row_offset - first_column * strip_size);
- disk_block_cnt = block_cnt;
+static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ u32 index;
+ u32 group;
+
+ group = rmd->map_index / rmd->data_disks_per_row;
+
+ index = rmd->map_index - (group * rmd->data_disks_per_row);
+ rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
+ if (rmd->layout_map_count > 2) {
+ index += rmd->data_disks_per_row;
+ rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
+ }
+
+ rmd->num_it_nexus_entries = rmd->layout_map_count;
+}
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ int rc;
+ struct raid_map *raid_map;
+ u32 group;
+ u32 next_bypass_group;
+ struct pqi_encryption_info *encryption_info_ptr;
+ struct pqi_encryption_info encryption_info;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ rmd.raid_level = device->raid_level;
+
+ if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (unlikely(rmd.block_cnt == 0))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ raid_map = device->raid_map;
+
+ rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ if (device->raid_level == SA_RAID_1 ||
+ device->raid_level == SA_RAID_TRIPLE) {
+ if (rmd.is_write) {
+ pqi_calc_aio_r1_nexus(raid_map, &rmd);
+ } else {
+ group = device->next_bypass_group[rmd.map_index];
+ next_bypass_group = group + 1;
+ if (next_bypass_group >= rmd.layout_map_count)
+ next_bypass_group = 0;
+ device->next_bypass_group[rmd.map_index] = next_bypass_group;
+ rmd.map_index += group * rmd.data_disks_per_row;
+ }
+ } else if ((device->raid_level == SA_RAID_5 ||
+ device->raid_level == SA_RAID_6) &&
+ (rmd.layout_map_count > 1 || rmd.is_write)) {
+ rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
+ if (rc)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+ }
+
+ if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
+ return PQI_RAID_BYPASS_INELIGIBLE;
+
+ rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
+ rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+ rmd.first_row * rmd.strip_size +
+ (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
+ rmd.disk_block_cnt = rmd.block_cnt;
/* Handle differing logical/physical block sizes. */
if (raid_map->phys_blk_shift) {
- disk_block <<= raid_map->phys_blk_shift;
- disk_block_cnt <<= raid_map->phys_blk_shift;
+ rmd.disk_block <<= raid_map->phys_blk_shift;
+ rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
}
- if (unlikely(disk_block_cnt > 0xffff))
+ if (unlikely(rmd.disk_block_cnt > 0xffff))
return PQI_RAID_BYPASS_INELIGIBLE;
- /* Build the new CDB for the physical disk I/O. */
- if (disk_block > 0xffffffff) {
- cdb[0] = is_write ? WRITE_16 : READ_16;
- cdb[1] = 0;
- put_unaligned_be64(disk_block, &cdb[2]);
- put_unaligned_be32(disk_block_cnt, &cdb[10]);
- cdb[14] = 0;
- cdb[15] = 0;
- cdb_length = 16;
- } else {
- cdb[0] = is_write ? WRITE_10 : READ_10;
- cdb[1] = 0;
- put_unaligned_be32((u32)disk_block, &cdb[2]);
- cdb[6] = 0;
- put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
- cdb[9] = 0;
- cdb_length = 10;
- }
-
- if (get_unaligned_le16(&raid_map->flags) &
- RAID_MAP_ENCRYPTION_ENABLED) {
- pqi_set_encryption_info(&encryption_info, raid_map,
- first_block);
+ pqi_set_aio_cdb(&rmd);
+
+ if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
+ if (rmd.data_length > device->max_transfer_encrypted)
+ return PQI_RAID_BYPASS_INELIGIBLE;
+ pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
encryption_info_ptr = &encryption_info;
} else {
encryption_info_ptr = NULL;
}
- return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
- cdb, cdb_length, queue_group, encryption_info_ptr, true);
+ if (rmd.is_write) {
+ switch (device->raid_level) {
+ case SA_RAID_1:
+ case SA_RAID_TRIPLE:
+ return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ case SA_RAID_5:
+ case SA_RAID_6:
+ return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
+ encryption_info_ptr, device, &rmd);
+ }
+ }
+
+ return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
+ rmd.cdb, rmd.cdb_length, queue_group,
+ encryption_info_ptr, true, false);
}
#define PQI_STATUS_IDLE 0x0
@@ -2624,7 +3103,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
u8 status;
pqi_registers = ctrl_info->pqi_registers;
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
signature = readq(&pqi_registers->signature);
@@ -2762,10 +3241,25 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
scsi_normalize_sense(error_info->data,
sense_data_length, &sshdr) &&
sshdr.sense_key == HARDWARE_ERROR &&
- sshdr.asc == 0x3e &&
- sshdr.ascq == 0x1) {
- pqi_take_device_offline(scmd->device, "RAID");
- host_byte = DID_NO_CONNECT;
+ sshdr.asc == 0x3e) {
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
+ struct pqi_scsi_dev *device = scmd->device->hostdata;
+
+ switch (sshdr.ascq) {
+ case 0x1: /* LOGICAL UNIT FAILURE */
+ if (printk_ratelimit())
+ scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ pqi_take_device_offline(scmd->device, "RAID");
+ host_byte = DID_NO_CONNECT;
+ break;
+
+ default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
+ if (printk_ratelimit())
+ scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
+ sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ break;
+ }
}
if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
@@ -2774,6 +3268,20 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sense_data_length);
}
+ if (pqi_cmd_priv(scmd)->this_residual &&
+ !pqi_is_logical_device(scmd->device->hostdata) &&
+ scsi_status == SAM_STAT_CHECK_CONDITION &&
+ host_byte == DID_OK &&
+ sense_data_length &&
+ scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
+ sshdr.sense_key == ILLEGAL_REQUEST &&
+ sshdr.asc == 0x26 &&
+ sshdr.ascq == 0x0) {
+ host_byte = DID_NO_CONNECT;
+ pqi_take_device_offline(scmd->device, "AIO");
+ scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
+ }
+
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
}
@@ -2861,8 +3369,7 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
}
if (device_offline && sense_data_length == 0)
- scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
- 0x3e, 0x1);
+ scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
@@ -2881,7 +3388,7 @@ static void pqi_process_io_error(unsigned int iu_type,
}
}
-static int pqi_interpret_task_management_response(
+static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
struct pqi_task_management_response *response)
{
int rc;
@@ -2894,18 +3401,30 @@ static int pqi_interpret_task_management_response(
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_TMF_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
}
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
+
return rc;
}
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
+{
+ pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
+}
+
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
{
- unsigned int num_responses;
+ int num_responses;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_io_request *io_request;
@@ -2917,6 +3436,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
while (1) {
oq_pi = readl(queue_group->oq_pi);
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
+ pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
+ return -1;
+ }
if (oq_pi == oq_ci)
break;
@@ -2925,29 +3451,39 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
request_id = get_unaligned_le16(&response->request_id);
- WARN_ON(request_id >= ctrl_info->max_io_slots);
+ if (request_id >= ctrl_info->max_io_slots) {
+ pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
+ return -1;
+ }
io_request = &ctrl_info->io_request_pool[request_id];
- WARN_ON(atomic_read(&io_request->refcount) == 0);
+ if (atomic_read(&io_request->refcount) == 0) {
+ pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
+ request_id, oq_pi, oq_ci);
+ return -1;
+ }
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
if (io_request->scmd)
io_request->scmd->result = 0;
- /* fall through */
+ fallthrough;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
break;
case PQI_RESPONSE_IU_VENDOR_GENERAL:
io_request->status =
get_unaligned_le16(
- &((struct pqi_vendor_general_response *)
- response)->status);
+ &((struct pqi_vendor_general_response *)response)->status);
break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
- io_request->status =
- pqi_interpret_task_management_response(
- (void *)response);
+ io_request->status = pqi_interpret_task_management_response(ctrl_info,
+ (void *)response);
break;
case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
@@ -2958,24 +3494,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
io_request->error_info = ctrl_info->error_buffer +
(get_unaligned_le16(&response->error_index) *
PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- pqi_process_io_error(response->header.iu_type,
- io_request);
+ pqi_process_io_error(response->header.iu_type, io_request);
break;
default:
+ pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
dev_err(&ctrl_info->pci_dev->dev,
- "unexpected IU type: 0x%x\n",
- response->header.iu_type);
- break;
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
+ response->header.iu_type, oq_pi, oq_ci);
+ return -1;
}
- io_request->io_complete_callback(io_request,
- io_request->context);
+ io_request->io_complete_callback(io_request, io_request->context);
/*
* Note that the I/O request structure CANNOT BE TOUCHED after
* returning from the I/O completion callback!
*/
-
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
}
@@ -3057,8 +3591,8 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
&request.header.iu_length);
request.event_type = event->event_type;
- request.event_id = event->event_id;
- request.additional_event_id = event->additional_event_id;
+ put_unaligned_le16(event->event_id, &request.event_id);
+ put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
@@ -3069,10 +3603,10 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
struct pqi_ctrl_info *ctrl_info)
{
- unsigned long timeout;
u8 status;
+ unsigned long timeout;
- timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = pqi_read_soft_reset_status(ctrl_info);
@@ -3082,133 +3616,218 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
if (status & PQI_SOFT_RESET_ABORT)
return RESET_ABORT;
+ if (!sis_is_firmware_running(ctrl_info))
+ return RESET_NORESPONSE;
+
if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
+ dev_warn(&ctrl_info->pci_dev->dev,
"timed out waiting for soft reset status\n");
return RESET_TIMEDOUT;
}
- if (!sis_is_firmware_running(ctrl_info))
- return RESET_NORESPONSE;
-
ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
}
}
-static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ unsigned int delay_secs;
+ enum pqi_soft_reset_status reset_status;
+
+ if (ctrl_info->soft_reset_handshake_supported)
+ reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
+ else
+ reset_status = RESET_INITIATE_FIRMWARE;
+
+ delay_secs = PQI_POST_RESET_DELAY_SECS;
switch (reset_status) {
- case RESET_INITIATE_DRIVER:
- /* fall through */
case RESET_TIMEDOUT:
+ delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
+ fallthrough;
+ case RESET_INITIATE_DRIVER:
dev_info(&ctrl_info->pci_dev->dev,
- "resetting controller %u\n", ctrl_info->ctrl_id);
+ "Online Firmware Activation: resetting controller\n");
sis_soft_reset(ctrl_info);
- /* fall through */
+ fallthrough;
case RESET_INITIATE_FIRMWARE:
- rc = pqi_ofa_ctrl_restart(ctrl_info);
- pqi_ofa_free_host_buffer(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+ rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
+ pqi_ctrl_ofa_done(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+ "Online Firmware Activation: %s\n",
+ rc == 0 ? "SUCCESS" : "FAILED");
break;
case RESET_ABORT:
- pqi_ofa_ctrl_unquiesce(ctrl_info);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation for controller %u: %s\n",
- ctrl_info->ctrl_id, "ABORTED");
+ "Online Firmware Activation ABORTED\n");
+ if (ctrl_info->soft_reset_handshake_supported)
+ pqi_clear_soft_reset_status(ctrl_info);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
break;
case RESET_NORESPONSE:
- pqi_ofa_free_host_buffer(ctrl_info);
- pqi_take_ctrl_offline(ctrl_info);
+ fallthrough;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "unexpected Online Firmware Activation reset status: 0x%x\n",
+ reset_status);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
+ pqi_ctrl_ofa_done(ctrl_info);
+ pqi_ofa_ctrl_unquiesce(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
break;
}
}
-static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
- struct pqi_event *event)
+static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
{
- u16 event_id;
- enum pqi_soft_reset_status status;
+ struct pqi_ctrl_info *ctrl_info;
- event_id = get_unaligned_le16(&event->event_id);
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
- mutex_lock(&ctrl_info->ofa_mutex);
+ pqi_ctrl_ofa_start(ctrl_info);
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
+}
- if (event_id == PQI_EVENT_OFA_QUIESCE) {
- dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
- pqi_ofa_ctrl_quiesce(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
- if (ctrl_info->soft_reset_handshake_supported) {
- status = pqi_poll_for_soft_reset_status(ctrl_info);
- pqi_process_soft_reset(ctrl_info, status);
- } else {
- pqi_process_soft_reset(ctrl_info,
- RESET_INITIATE_FIRMWARE);
- }
+static void pqi_ofa_quiesce_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_event *event;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
+
+ event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
+
+ pqi_ofa_ctrl_quiesce(ctrl_info);
+ pqi_acknowledge_event(ctrl_info, event);
+ pqi_process_soft_reset(ctrl_info);
+}
+
+static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event)
+{
+ bool ack_event;
+
+ ack_event = true;
- } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- pqi_acknowledge_event(ctrl_info, event);
- pqi_ofa_setup_host_buffer(ctrl_info,
- le32_to_cpu(event->ofa_bytes_requested));
- pqi_ofa_host_memory_update(ctrl_info);
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- pqi_ofa_free_host_buffer(ctrl_info);
- pqi_acknowledge_event(ctrl_info, event);
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "received Online Firmware Activation memory allocation request\n");
+ schedule_work(&ctrl_info->ofa_memory_alloc_work);
+ break;
+ case PQI_EVENT_OFA_QUIESCE:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation quiesce request\n");
+ schedule_work(&ctrl_info->ofa_quiesce_work);
+ ack_event = false;
+ break;
+ case PQI_EVENT_OFA_CANCELED:
+ dev_info(&ctrl_info->pci_dev->dev,
+ "received Online Firmware Activation cancel request: reason: %u\n",
+ ctrl_info->ofa_cancel_reason);
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
+ pqi_ctrl_ofa_done(ctrl_info);
+ break;
+ default:
+ dev_err(&ctrl_info->pci_dev->dev,
+ "received unknown Online Firmware Activation request: event ID: %u\n",
+ event->event_id);
+ break;
}
- mutex_unlock(&ctrl_info->ofa_mutex);
+ return ack_event;
+}
+
+static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
+ device->rescan = true;
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
+ bool rescan_needed;
struct pqi_ctrl_info *ctrl_info;
struct pqi_event *event;
+ bool ack_event;
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
pqi_ctrl_busy(ctrl_info);
- pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
goto out;
- pqi_schedule_rescan_worker_delayed(ctrl_info);
-
+ rescan_needed = false;
event = ctrl_info->events;
for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (event->pending) {
event->pending = false;
if (event->event_type == PQI_EVENT_TYPE_OFA) {
- pqi_ctrl_unbusy(ctrl_info);
- pqi_ofa_process_event(ctrl_info, event);
- return;
+ ack_event = pqi_ofa_process_event(ctrl_info, event);
+ } else {
+ ack_event = true;
+ rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+ pqi_mark_volumes_for_rescan(ctrl_info);
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
- pqi_acknowledge_event(ctrl_info, event);
+ if (ack_event)
+ pqi_acknowledge_event(ctrl_info, event);
}
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
+ if (rescan_needed)
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
+
out:
pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
- heartbeat_timer);
+ struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t,
+ heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -3222,7 +3841,7 @@ static void pqi_heartbeat_timer_handler(struct timer_list *t)
dev_err(&ctrl_info->pci_dev->dev,
"no heartbeat detected - last heartbeat count: %u\n",
heartbeat_count);
- pqi_take_ctrl_offline(ctrl_info);
+ pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
return;
}
} else {
@@ -3251,46 +3870,27 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
- del_timer_sync(&ctrl_info->heartbeat_timer);
+ timer_delete_sync(&ctrl_info->heartbeat_timer);
}
-static inline int pqi_event_type_to_event_index(unsigned int event_type)
+static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_event *event, struct pqi_event_response *response)
{
- int index;
-
- for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
- if (event_type == pqi_supported_event_types[index])
- return index;
-
- return -1;
-}
-
-static inline bool pqi_is_supported_event(unsigned int event_type)
-{
- return pqi_event_type_to_event_index(event_type) != -1;
-}
-
-static void pqi_ofa_capture_event_payload(struct pqi_event *event,
- struct pqi_event_response *response)
-{
- u16 event_id;
-
- event_id = get_unaligned_le16(&event->event_id);
-
- if (event->event_type == PQI_EVENT_TYPE_OFA) {
- if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
- event->ofa_bytes_requested =
- response->data.ofa_memory_allocation.bytes_requested;
- } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
- event->ofa_cancel_reason =
- response->data.ofa_cancelled.reason;
- }
+ switch (event->event_id) {
+ case PQI_EVENT_OFA_MEMORY_ALLOCATION:
+ ctrl_info->ofa_bytes_requested =
+ get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
+ break;
+ case PQI_EVENT_OFA_CANCELED:
+ ctrl_info->ofa_cancel_reason =
+ get_unaligned_le16(&response->data.ofa_cancelled.reason);
+ break;
}
}
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_events;
+ int num_events;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
@@ -3304,26 +3904,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) {
oq_pi = readl(event_queue->oq_pi);
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
+ pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
+ return -1;
+ }
+
if (oq_pi == oq_ci)
break;
num_events++;
- response = event_queue->oq_element_array +
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
-
- event_index =
- pqi_event_type_to_event_index(response->event_type);
-
- if (event_index >= 0) {
- if (response->request_acknowlege) {
- event = &ctrl_info->events[event_index];
- event->pending = true;
- event->event_type = response->event_type;
- event->event_id = response->event_id;
- event->additional_event_id =
- response->additional_event_id;
- pqi_ofa_capture_event_payload(event, response);
- }
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+ event_index = pqi_event_type_to_event_index(response->event_type);
+
+ if (event_index >= 0 && response->request_acknowledge) {
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = get_unaligned_le16(&response->event_id);
+ event->additional_event_id =
+ get_unaligned_le32(&response->additional_event_id);
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
+ pqi_ofa_capture_event_payload(ctrl_info, event, response);
}
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3340,8 +3945,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
-static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3418,8 +4022,7 @@ static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
valid_irq = true;
break;
case IRQ_MODE_INTX:
- intx_status =
- readl(&ctrl_info->pqi_registers->legacy_intx_status);
+ intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
if (intx_status & PQI_LEGACY_INTX_PENDING)
valid_irq = true;
else
@@ -3438,7 +4041,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_queue_group *queue_group;
- unsigned int num_responses_handled;
+ int num_io_responses_handled;
+ int num_events_handled;
queue_group = data;
ctrl_info = queue_group->ctrl_info;
@@ -3446,17 +4050,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ if (num_io_responses_handled < 0)
+ goto out;
- if (irq == ctrl_info->event_irq)
- num_responses_handled += pqi_process_event_intr(ctrl_info);
+ if (irq == ctrl_info->event_irq) {
+ num_events_handled = pqi_process_event_intr(ctrl_info);
+ if (num_events_handled < 0)
+ goto out;
+ } else {
+ num_events_handled = 0;
+ }
- if (num_responses_handled)
+ if (num_io_responses_handled + num_events_handled > 0)
atomic_inc(&ctrl_info->num_interrupts);
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+out:
return IRQ_HANDLED;
}
@@ -3497,10 +4109,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -3731,7 +4347,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
&admin_queues_aligned->iq_element_array;
admin_queues->oq_element_array =
&admin_queues_aligned->oq_element_array;
- admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+ admin_queues->iq_ci =
+ (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
admin_queues->oq_pi =
(pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
@@ -3745,8 +4362,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
ctrl_info->admin_queue_memory_base);
admin_queues->iq_ci_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
- ((void *)admin_queues->iq_ci -
- ctrl_info->admin_queue_memory_base);
+ ((void __iomem *)admin_queues->iq_ci -
+ (void __iomem *)ctrl_info->admin_queue_memory_base);
admin_queues->oq_pi_bus_addr =
ctrl_info->admin_queue_memory_base_dma_handle +
((void __iomem *)admin_queues->oq_pi -
@@ -3755,7 +4372,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3779,20 +4396,21 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
&pqi_registers->admin_oq_pi_addr);
reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
+
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
&pqi_registers->function_and_status_code);
timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
while (1) {
+ msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
status = readb(&pqi_registers->function_and_status_code);
if (status == PQI_STATUS_IDLE)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
}
/*
@@ -3848,7 +4466,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = readl(admin_queues->oq_pi);
@@ -3963,7 +4581,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
rc = 0;
break;
}
@@ -3986,8 +4604,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
- *error_info)
+static int pqi_process_raid_io_error_synchronous(
+ struct pqi_raid_error_info *error_info)
{
int rc = -EIO;
@@ -4009,56 +4627,41 @@ static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
return rc;
}
+static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
+{
+ return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
+}
+
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
struct pqi_iu_header *request, unsigned int flags,
- struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+ struct pqi_raid_error_info *error_info)
{
int rc = 0;
struct pqi_io_request *io_request;
- unsigned long start_jiffies;
- unsigned long msecs_blocked;
size_t iu_length;
DECLARE_COMPLETION_ONSTACK(wait);
- /*
- * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
- * are mutually exclusive.
- */
-
if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
if (down_interruptible(&ctrl_info->sync_request_sem))
return -ERESTARTSYS;
} else {
- if (timeout_msecs == NO_TIMEOUT) {
- down(&ctrl_info->sync_request_sem);
- } else {
- start_jiffies = jiffies;
- if (down_timeout(&ctrl_info->sync_request_sem,
- msecs_to_jiffies(timeout_msecs)))
- return -ETIMEDOUT;
- msecs_blocked =
- jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_blocked >= timeout_msecs)
- return -ETIMEDOUT;
- timeout_msecs -= msecs_blocked;
- }
+ down(&ctrl_info->sync_request_sem);
}
pqi_ctrl_busy(ctrl_info);
- timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
- if (timeout_msecs == 0) {
- pqi_ctrl_unbusy(ctrl_info);
- rc = -ETIMEDOUT;
- goto out;
- }
+ /*
+ * Wait for other admin queue updates such as;
+ * config table changes, OFA memory updates, ...
+ */
+ if (pqi_is_blockable_request(request))
+ pqi_wait_if_ctrl_blocked(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
- pqi_ctrl_unbusy(ctrl_info);
rc = -ENXIO;
goto out;
}
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, NULL);
put_unaligned_le16(io_request->index,
&(((struct pqi_raid_path_request *)request)->request_id));
@@ -4074,37 +4677,24 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
io_request->io_complete_callback = pqi_raid_synchronous_complete;
io_request->context = &wait;
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- pqi_ctrl_unbusy(ctrl_info);
-
- if (timeout_msecs == NO_TIMEOUT) {
- pqi_wait_for_completion_io(ctrl_info, &wait);
- } else {
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(timeout_msecs))) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "command timed out\n");
- rc = -ETIMEDOUT;
- }
- }
+ pqi_wait_for_completion_io(ctrl_info, &wait);
if (error_info) {
if (io_request->error_info)
- memcpy(error_info, io_request->error_info,
- sizeof(*error_info));
+ memcpy(error_info, io_request->error_info, sizeof(*error_info));
else
memset(error_info, 0, sizeof(*error_info));
} else if (rc == 0 && io_request->error_info) {
- rc = pqi_process_raid_io_error_synchronous(
- io_request->error_info);
+ rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
}
pqi_free_io_request(io_request);
out:
+ pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -4141,8 +4731,7 @@ static int pqi_submit_admin_request_synchronous(
rc = pqi_poll_for_admin_response(ctrl_info, response);
if (rc == 0)
- rc = pqi_validate_admin_response(response,
- request->function_code);
+ rc = pqi_validate_admin_response(response, request->function_code);
return rc;
}
@@ -4176,8 +4765,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
- &response);
+ rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
@@ -4479,8 +5067,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
}
#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
- (offsetof(struct pqi_event_config, descriptors) + \
- (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+ struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
bool enable_events)
@@ -4512,8 +5099,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4526,7 +5112,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
event_descriptor = &event_config->descriptors[i];
if (enable_events &&
pqi_is_supported_event(event_descriptor->event_type))
- put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
&event_descriptor->oq_id);
else
put_unaligned_le16(0, &event_descriptor->oq_id);
@@ -4548,8 +5134,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
- NULL, NO_TIMEOUT);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
@@ -4566,11 +5151,6 @@ static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
return pqi_configure_events(ctrl_info, true);
}
-static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
-{
- return pqi_configure_events(ctrl_info, false);
-}
-
static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4602,10 +5182,9 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle,
- GFP_KERNEL);
-
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -4621,9 +5200,8 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
struct device *dev;
struct pqi_io_request *io_request;
- ctrl_info->io_request_pool =
- kcalloc(ctrl_info->max_io_slots,
- sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+ ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
+ sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
if (!ctrl_info->io_request_pool) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4636,8 +5214,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request = ctrl_info->io_request_pool;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
- io_request->iu =
- kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+ io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
if (!io_request->iu) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -4657,8 +5234,7 @@ static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
io_request->index = i;
io_request->sg_chain_buffer = sg_chain_buffer;
- io_request->sg_chain_buffer_dma_handle =
- sg_chain_buffer_dma_handle;
+ io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
io_request++;
}
@@ -4687,7 +5263,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
ctrl_info->error_buffer_length =
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
- if (reset_devices)
+ if (is_kdump_kernel())
max_transfer_size = min(ctrl_info->max_transfer_size,
PQI_MAX_TRANSFER_SIZE_KDUMP);
else
@@ -4716,23 +5292,21 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
u16 num_elements_per_iq;
u16 num_elements_per_oq;
- if (reset_devices) {
+ if (is_kdump_kernel()) {
num_queue_groups = 1;
} else {
- int num_cpus;
int max_queue_groups;
max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
ctrl_info->max_outbound_queues - 1);
max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
- num_cpus = num_online_cpus();
- num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+ num_queue_groups =
+ blk_mq_num_online_queues(ctrl_info->max_msix_vectors);
num_queue_groups = min(num_queue_groups, max_queue_groups);
}
ctrl_info->num_queue_groups = num_queue_groups;
- ctrl_info->max_hw_queue_index = num_queue_groups - 1;
/*
* Make sure that the max. inbound IU length is an even multiple
@@ -4765,10 +5339,16 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
sizeof(struct pqi_sg_descriptor)) +
PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+
+ ctrl_info->max_sg_per_r56_iu =
+ ((ctrl_info->max_inbound_iu_length -
+ PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+ sizeof(struct pqi_sg_descriptor)) +
+ PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
}
-static inline void pqi_set_sg_descriptor(
- struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg)
{
u64 address = (u64)sg_dma_address(sg);
unsigned int length = sg_dma_len(sg);
@@ -4778,16 +5358,52 @@ static inline void pqi_set_sg_descriptor(
put_unaligned_le32(0, &sg_descriptor->flags);
}
+static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
+ struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
+ int max_sg_per_iu, bool *chained)
+{
+ int i;
+ unsigned int num_sg_in_iu;
+
+ *chained = false;
+ i = 0;
+ num_sg_in_iu = 0;
+ max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
+
+ while (1) {
+ pqi_set_sg_descriptor(sg_descriptor, sg);
+ if (!*chained)
+ num_sg_in_iu++;
+ i++;
+ if (i == sg_count)
+ break;
+ sg_descriptor++;
+ if (i == max_sg_per_iu) {
+ put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
+ &sg_descriptor->address);
+ put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
+ &sg_descriptor->length);
+ put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
+ *chained = true;
+ num_sg_in_iu++;
+ sg_descriptor = io_request->sg_chain_buffer;
+ }
+ sg = sg_next(sg);
+ }
+
+ put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+ return num_sg_in_iu;
+}
+
static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4803,41 +5419,89 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+
+ return 0;
+}
+
+static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
num_sg_in_iu = 0;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ if (sg_count == 0)
+ goto out;
+
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
out:
put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
+
+ return 0;
+}
+
+static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
+ struct pqi_io_request *io_request)
+{
+ u16 iu_length;
+ int sg_count;
+ bool chained;
+ unsigned int num_sg_in_iu;
+ struct scatterlist *sg;
+ struct pqi_sg_descriptor *sg_descriptor;
+
+ sg_count = scsi_dma_map(scmd);
+ if (sg_count < 0)
+ return sg_count;
+
+ iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
+ PQI_REQUEST_HEADER_LENGTH;
+ num_sg_in_iu = 0;
+
+ if (sg_count != 0) {
+ sg = scsi_sglist(scmd);
+ sg_descriptor = request->sg_descriptors;
+
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_r56_iu, &chained);
+
+ request->partial = chained;
+ iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+ }
+
+ put_unaligned_le16(iu_length, &request->header.iu_length);
+ request->num_sg_descriptors = num_sg_in_iu;
return 0;
}
@@ -4846,12 +5510,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
- int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
- unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@@ -4868,35 +5530,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
- max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
- chained = false;
- i = 0;
- while (1) {
- pqi_set_sg_descriptor(sg_descriptor, sg);
- if (!chained)
- num_sg_in_iu++;
- i++;
- if (i == sg_count)
- break;
- sg_descriptor++;
- if (i == max_sg_per_iu) {
- put_unaligned_le64(
- (u64)io_request->sg_chain_buffer_dma_handle,
- &sg_descriptor->address);
- put_unaligned_le32((sg_count - num_sg_in_iu)
- * sizeof(*sg_descriptor),
- &sg_descriptor->length);
- put_unaligned_le32(CISS_SG_CHAIN,
- &sg_descriptor->flags);
- chained = true;
- num_sg_in_iu++;
- sg_descriptor = io_request->sg_chain_buffer;
- }
- sg = sg_next(sg);
- }
+ num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
+ ctrl_info->max_sg_per_iu, &chained);
- put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
@@ -4918,29 +5555,44 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+/*
+ * Adjust the timeout value for physical devices sent to the firmware
+ * by subtracting 3 seconds for timeouts greater than or equal to 8 seconds.
+ *
+ * This provides the firmware with additional time to attempt early recovery
+ * before the OS-level timeout occurs.
+ */
+#define ADJUST_SECS_TIMEOUT_VALUE(tv) (((tv) >= 8) ? ((tv) - 3) : (tv))
+
+static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+ struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
+ u32 timeout;
size_t cdb_length;
+ struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ struct request *rq;
+
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
- memcpy(request->lun_number, device->scsi3addr,
- sizeof(request->lun_number));
+ memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -4950,38 +5602,28 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
case 10:
case 12:
case 16:
- /* No bytes in the Additional CDB bytes field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_0;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
break;
case 20:
- /* 4 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_4;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
break;
case 24:
- /* 8 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_8;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
break;
case 28:
- /* 12 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_12;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
break;
case 32:
default:
- /* 16 bytes in the Additional cdb field */
- request->additional_cdb_bytes_usage =
- SOP_ADDITIONAL_CDB_BYTES_16;
+ request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
break;
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5003,6 +5645,12 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (device->is_physical_device) {
+ rq = scsi_cmd_to_rq(scmd);
+ timeout = rq->timeout / HZ;
+ put_unaligned_le32(ADJUST_SECS_TIMEOUT_VALUE(timeout), &request->timeout);
+ }
+
pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
return 0;
@@ -5012,18 +5660,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
- struct pqi_io_request *io_request;
+ bool io_high_prio;
- io_request = pqi_alloc_io_request(ctrl_info);
-
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
-}
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
-static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
-{
- if (!pqi_ctrl_blocked(ctrl_info))
- schedule_work(&ctrl_info->raid_bypass_retry_work);
+ return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
}
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
@@ -5042,7 +5683,7 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return false;
device = scmd->device->hostdata;
- if (pqi_device_offline(device))
+ if (pqi_device_offline(device) || pqi_device_in_remove(device))
return false;
ctrl_info = shost_to_hba(scmd->device->host);
@@ -5052,132 +5693,6 @@ static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
return true;
}
-static inline void pqi_add_to_raid_bypass_retry_list(
- struct pqi_ctrl_info *ctrl_info,
- struct pqi_io_request *io_request, bool at_head)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- if (at_head)
- list_add(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- else
- list_add_tail(&io_request->request_list_entry,
- &ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
-static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
- void *context)
-{
- struct scsi_cmnd *scmd;
-
- scmd = io_request->scmd;
- pqi_free_io_request(io_request);
- pqi_scsi_done(scmd);
-}
-
-static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_ctrl_info *ctrl_info;
-
- io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
- scmd = io_request->scmd;
- scmd->result = 0;
- ctrl_info = shost_to_hba(scmd->device->host);
-
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
- pqi_schedule_bypass_retry(ctrl_info);
-}
-
-static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
-{
- struct scsi_cmnd *scmd;
- struct pqi_scsi_dev *device;
- struct pqi_ctrl_info *ctrl_info;
- struct pqi_queue_group *queue_group;
-
- scmd = io_request->scmd;
- device = scmd->device->hostdata;
- if (pqi_device_in_reset(device)) {
- pqi_free_io_request(io_request);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- return 0;
- }
-
- ctrl_info = shost_to_hba(scmd->device->host);
- queue_group = io_request->queue_group;
-
- pqi_reinit_io_request(io_request);
-
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
-}
-
-static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_io_request *io_request;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- io_request = list_first_entry_or_null(
- &ctrl_info->raid_bypass_retry_list,
- struct pqi_io_request, request_list_entry);
- if (io_request)
- list_del(&io_request->request_list_entry);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-
- return io_request;
-}
-
-static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
-{
- int rc;
- struct pqi_io_request *io_request;
-
- pqi_ctrl_busy(ctrl_info);
-
- while (1) {
- if (pqi_ctrl_blocked(ctrl_info))
- break;
- io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
- if (!io_request)
- break;
- rc = pqi_retry_raid_bypass(io_request);
- if (rc) {
- pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
- true);
- pqi_schedule_bypass_retry(ctrl_info);
- break;
- }
- }
-
- pqi_ctrl_unbusy(ctrl_info);
-}
-
-static void pqi_raid_bypass_retry_worker(struct work_struct *work)
-{
- struct pqi_ctrl_info *ctrl_info;
-
- ctrl_info = container_of(work, struct pqi_ctrl_info,
- raid_bypass_retry_work);
- pqi_retry_raid_bypass_requests(ctrl_info);
-}
-
-static void pqi_clear_all_queued_raid_bypass_retries(
- struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
-}
-
static void pqi_aio_io_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5185,12 +5700,11 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
scmd = io_request->scmd;
scsi_dma_unmap(scmd);
- if (io_request->status == -EAGAIN)
+ if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
set_host_byte(scmd, DID_IMM_RETRY);
- else if (pqi_raid_bypass_retry_needed(io_request)) {
- pqi_queue_raid_bypass_retry(io_request);
- return;
+ pqi_cmd_priv(scmd)->this_residual++;
}
+
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
@@ -5199,34 +5713,45 @@ static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
+ bool io_high_prio;
+
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
+
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL,
+ false, io_high_prio);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass,
+ bool io_high_prio)
{
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
request = io_request->iu;
- memset(request, 0,
- offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
put_unaligned_le32(aio_handle, &request->nexus_id);
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!raid_bypass && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5273,16 +5798,149 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r1_path_request *r1_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r1_request = io_request->iu;
+ memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
+
+ r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
+ r1_request->num_drives = rmd->num_it_nexus_entries;
+ put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
+ put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
+ if (rmd->num_it_nexus_entries == 3)
+ put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
+
+ put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
+ r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le16(io_request->index, &r1_request->request_id);
+ r1_request->error_index = r1_request->request_id;
+ if (rmd->cdb_length > sizeof(r1_request->cdb))
+ rmd->cdb_length = sizeof(r1_request->cdb);
+ r1_request->cdb_length = rmd->cdb_length;
+ memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r1_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r1_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r1_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r1_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r1_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
+static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
+ struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
+ struct pqi_scsi_dev_raid_map_data *rmd)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+ struct pqi_aio_r56_path_request *r56_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ io_request->io_complete_callback = pqi_aio_io_complete;
+ io_request->scmd = scmd;
+ io_request->raid_bypass = true;
+
+ r56_request = io_request->iu;
+ memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
+
+ if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
+ else
+ r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
+
+ put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
+ put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
+ put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
+ if (rmd->raid_level == SA_RAID_6) {
+ put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
+ r56_request->xor_multiplier = rmd->xor_mult;
+ }
+ put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
+ r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ put_unaligned_le64(rmd->row, &r56_request->row);
+
+ put_unaligned_le16(io_request->index, &r56_request->request_id);
+ r56_request->error_index = r56_request->request_id;
+
+ if (rmd->cdb_length > sizeof(r56_request->cdb))
+ rmd->cdb_length = sizeof(r56_request->cdb);
+ r56_request->cdb_length = rmd->cdb_length;
+ memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
+
+ /* The direction is always write. */
+ r56_request->data_direction = SOP_READ_FLAG;
+
+ if (encryption_info) {
+ r56_request->encryption_enable = true;
+ put_unaligned_le16(encryption_info->data_encryption_key_index,
+ &r56_request->data_encryption_key_index);
+ put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+ &r56_request->encrypt_tweak_lower);
+ put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+ &r56_request->encrypt_tweak_upper);
+ }
+
+ rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
+ if (rc) {
+ pqi_free_io_request(io_request);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+ return 0;
+}
+
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
- u16 hw_queue;
+ /*
+ * We are setting host_tagset = 1 during init.
+ */
+ return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
+}
- hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
- if (hw_queue > ctrl_info->max_hw_queue_index)
- hw_queue = 0;
+static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
+{
+ if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
+ return false;
- return hw_queue;
+ return pqi_cmd_priv(scmd)->this_residual == 0;
}
/*
@@ -5293,6 +5951,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{
struct pqi_scsi_dev *device;
+ struct completion *wait;
if (!scmd->device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5305,21 +5964,102 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
+
+ wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
+ if (wait != PQI_NO_COMPLETION)
+ complete(wait);
}
-static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
+ u32 oldest_jiffies;
+ u8 lru_index;
+ int i;
+ int rc;
+ struct pqi_scsi_dev *device;
+ struct pqi_stream_data *pqi_stream_data;
+ struct pqi_scsi_dev_raid_map_data rmd = { 0 };
+
+ if (!ctrl_info->enable_stream_detection)
+ return false;
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return false;
+
+ /* Check writes only. */
+ if (!rmd.is_write)
+ return false;
+
+ device = scmd->device->hostdata;
+
+ /* Check for RAID 5/6 streams. */
+ if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+ return false;
+
+ /*
+ * If controller does not support AIO RAID{5,6} writes, need to send
+ * requests down non-AIO path.
+ */
+ if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+ (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+ return true;
+
+ lru_index = 0;
+ oldest_jiffies = INT_MAX;
+ for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+ pqi_stream_data = &device->stream_data[i];
+ /*
+ * Check for adjacent request or request is within
+ * the previous request.
+ */
+ if ((pqi_stream_data->next_lba &&
+ rmd.first_block >= pqi_stream_data->next_lba) &&
+ rmd.first_block <= pqi_stream_data->next_lba +
+ rmd.block_cnt) {
+ pqi_stream_data->next_lba = rmd.first_block +
+ rmd.block_cnt;
+ pqi_stream_data->last_accessed = jiffies;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
+ return true;
+ }
+
+ /* unused entry */
+ if (pqi_stream_data->last_accessed == 0) {
+ lru_index = i;
+ break;
+ }
+
+ /* Find entry with oldest last accessed time. */
+ if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+ oldest_jiffies = pqi_stream_data->last_accessed;
+ lru_index = i;
+ }
+ }
+
+ /* Set LRU entry. */
+ pqi_stream_data = &device->stream_data[lru_index];
+ pqi_stream_data->last_accessed = jiffies;
+ pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+
+ return false;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
+ u8 lun;
+
+ scmd->host_scribble = PQI_NO_COMPLETION;
device = scmd->device->hostdata;
- ctrl_info = shost_to_hba(shost);
if (!device) {
set_host_byte(scmd, DID_NO_CONNECT);
@@ -5327,18 +6067,19 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ lun = (u8)scmd->device->lun;
- if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
- device)) {
+ atomic_inc(&device->scsi_cmds_outstanding[lun]);
+
+ ctrl_info = shost_to_hba(shost);
+
+ if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
- pqi_ctrl_busy(ctrl_info);
- if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5355,95 +6096,122 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
- rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
- scmd, queue_group);
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
+ pqi_is_bypass_eligible_request(scmd) &&
+ !pqi_is_parity_write_stream(ctrl_info, scmd)) {
+ rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
+ }
}
if (!raid_bypassed)
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
} else {
if (device->aio_enabled)
- rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
else
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
}
out:
- pqi_ctrl_unbusy(ctrl_info);
- if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ if (rc) {
+ scmd->host_scribble = NULL;
+ atomic_dec(&device->scsi_cmds_outstanding[lun]);
+ }
return rc;
}
-static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
{
+ unsigned int i;
unsigned int path;
unsigned long flags;
- bool list_is_empty;
+ unsigned int queued_io_count;
+ struct pqi_queue_group *queue_group;
+ struct pqi_io_request *io_request;
- for (path = 0; path < 2; path++) {
- while (1) {
- spin_lock_irqsave(
- &queue_group->submit_lock[path], flags);
- list_is_empty =
- list_empty(&queue_group->request_list[path]);
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- if (list_is_empty)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
+ queued_io_count = 0;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+ list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
+ queued_io_count++;
+ spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
}
}
- return 0;
+ return queued_io_count;
}
-static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
{
- int rc;
unsigned int i;
unsigned int path;
+ unsigned int nonempty_inbound_queue_count;
struct pqi_queue_group *queue_group;
pqi_index_t iq_pi;
pqi_index_t iq_ci;
+ nonempty_inbound_queue_count = 0;
+
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
queue_group = &ctrl_info->queue_groups[i];
-
- rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
- if (rc)
- return rc;
-
for (path = 0; path < 2; path++) {
iq_pi = queue_group->iq_pi_copy[path];
+ iq_ci = readl(queue_group->iq_ci[path]);
+ if (iq_ci != iq_pi)
+ nonempty_inbound_queue_count++;
+ }
+ }
- while (1) {
- iq_ci = readl(queue_group->iq_ci[path]);
- if (iq_ci == iq_pi)
- break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- usleep_range(1000, 2000);
- }
+ return nonempty_inbound_queue_count;
+}
+
+#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
+
+static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned int queued_io_count;
+ unsigned int nonempty_inbound_queue_count;
+ bool displayed_warning;
+
+ displayed_warning = false;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
+
+ while (1) {
+ queued_io_count = pqi_queued_io_count(ctrl_info);
+ nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
+ if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
+ displayed_warning = true;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
+ usleep_range(1000, 2000);
}
+ if (displayed_warning)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "queued I/O drained after waiting for %u seconds\n",
+ jiffies_to_msecs(jiffies - start_jiffies) / 1000);
+
return 0;
}
static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+ struct pqi_scsi_dev *device, u8 lun)
{
unsigned int i;
unsigned int path;
@@ -5464,16 +6232,20 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(io_request, next,
&queue_group->request_list[path],
request_list_entry) {
+
scmd = io_request->scmd;
if (!scmd)
continue;
scsi_device = scmd->device->hostdata;
- if (scsi_device != device)
- continue;
list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
+ if (scsi_device == device && (u8)scmd->device->lun == lun)
+ set_host_byte(scmd, DID_RESET);
+ else
+ set_host_byte(scmd, DID_REQUEUE);
+ pqi_free_io_request(io_request);
+ scsi_dma_unmap(scmd);
pqi_scsi_done(scmd);
}
@@ -5483,102 +6255,39 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
}
}
-static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned int i;
- unsigned int path;
- struct pqi_queue_group *queue_group;
- unsigned long flags;
- struct pqi_io_request *io_request;
- struct pqi_io_request *next;
- struct scsi_cmnd *scmd;
-
- for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- queue_group = &ctrl_info->queue_groups[i];
-
- for (path = 0; path < 2; path++) {
- spin_lock_irqsave(&queue_group->submit_lock[path],
- flags);
-
- list_for_each_entry_safe(io_request, next,
- &queue_group->request_list[path],
- request_list_entry) {
-
- scmd = io_request->scmd;
- if (!scmd)
- continue;
-
- list_del(&io_request->request_list_entry);
- set_host_byte(scmd, DID_RESET);
- pqi_scsi_done(scmd);
- }
-
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
- }
- }
-}
+#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_secs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
- unsigned long timeout;
+ int cmds_outstanding;
+ unsigned long start_jiffies;
+ unsigned long warning_timeout;
+ unsigned long msecs_waiting;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ start_jiffies = jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while (atomic_read(&device->scsi_cmds_outstanding)) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
}
- usleep_range(1000, 2000);
- }
-
- return 0;
-}
-
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- unsigned long timeout_secs)
-{
- bool io_pending;
- unsigned long flags;
- unsigned long timeout;
- struct pqi_scsi_dev *device;
-
- timeout = (timeout_secs * PQI_HZ) + jiffies;
- while (1) {
- io_pending = false;
-
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (atomic_read(&device->scsi_cmds_outstanding)) {
- io_pending = true;
- break;
- }
+ msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
+ if (msecs_waiting >= timeout_msecs) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ lun, msecs_waiting / 1000, cmds_outstanding);
+ return -ETIMEDOUT;
}
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!io_pending)
- break;
-
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
-
- if (timeout_secs != NO_TIMEOUT) {
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "timed out waiting for pending IO\n");
- return -ETIMEDOUT;
- }
+ if (time_after(jiffies, warning_timeout)) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target,
+ lun, msecs_waiting / 1000, cmds_outstanding);
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -5594,16 +6303,20 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
+ unsigned int wait_secs;
+ int cmds_outstanding;
+
+ wait_secs = 0;
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
rc = 0;
break;
}
@@ -5613,20 +6326,27 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
rc = -ENXIO;
break;
}
+
+ wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
}
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -5639,13 +6359,16 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+ if (ctrl_info->tmf_iu_timeout_supported)
+ put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
- pqi_start_io(ctrl_info,
- &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+ pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -5654,93 +6377,164 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
return rc;
}
-#define PQI_LUN_RESET_RETRIES 3
-#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
-/* Performs a reset at the LUN level. */
+#define PQI_LUN_RESET_RETRIES 3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
+#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
- int rc;
+ int reset_rc;
+ int wait_rc;
unsigned int retries;
- unsigned long timeout_secs;
+ unsigned long timeout_msecs;
for (retries = 0;;) {
- rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN ||
- ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, device, lun);
+ if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
- timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT;
- rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+ timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
+ PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- return rc == 0 ? SUCCESS : FAILED;
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
+ if (wait_rc && reset_rc == 0)
+ reset_rc = wait_rc;
+
+ return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{
int rc;
- mutex_lock(&ctrl_info->lun_reset_mutex);
-
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_fail_io_queued_for_device(ctrl_info, device);
+ pqi_fail_io_queued_for_device(ctrl_info, device, lun);
rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_device_reset_start(device);
+ pqi_device_reset_start(device, lun);
pqi_ctrl_unblock_requests(ctrl_info);
-
if (rc)
rc = FAILED;
else
- rc = _pqi_device_reset(ctrl_info, device);
+ rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
+ pqi_device_reset_done(device, lun);
+
+ return rc;
+}
+
+static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
+{
+ unsigned long flags;
+ int rc;
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_find_scsi_dev(ctrl_info, device->bus, device->target, device->lun) == NULL) {
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "skipping reset of scsi %d:%d:%d:%u, device has been removed\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ dev_err(&ctrl_info->pci_dev->dev,
+ "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ rc = FAILED;
+ else
+ rc = pqi_device_reset(ctrl_info, device, lun);
- pqi_device_reset_done(device);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "reset of scsi %d:%d:%d:%u: %s\n",
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
+
return rc;
}
static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
- int rc;
struct Scsi_Host *shost;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
+ u8 scsi_opcode;
+
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
+ device = scmd->device->hostdata;
+ scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+
+ return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
+}
+
+static void pqi_tmf_worker(struct work_struct *work)
+{
+ struct pqi_tmf_work *tmf_work;
+ struct scsi_cmnd *scmd;
+
+ tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
+ scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
+
+ pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
+}
+
+static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
+ DECLARE_COMPLETION_ONSTACK(wait);
shost = scmd->device->host;
ctrl_info = shost_to_hba(shost);
device = scmd->device->hostdata;
dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d\n",
- shost->host_no, device->bus, device->target, device->lun);
+ "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
- rc = FAILED;
+ "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
+ scmd->result = DID_RESET << 16;
goto out;
}
- pqi_wait_until_ofa_finished(ctrl_info);
+ tmf_work = &device->tmf_work[scmd->device->lun];
+
+ if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
+ tmf_work->ctrl_info = ctrl_info;
+ tmf_work->device = device;
+ tmf_work->lun = (u8)scmd->device->lun;
+ tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
+ schedule_work(&tmf_work->work_struct);
+ }
+
+ wait_for_completion(&wait);
- rc = pqi_device_reset(ctrl_info, device);
-out:
dev_err(&ctrl_info->pci_dev->dev,
- "reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
- rc == SUCCESS ? "SUCCESS" : "FAILED");
+ "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
+ shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
- return rc;
+out:
+
+ return SUCCESS;
}
-static int pqi_slave_alloc(struct scsi_device *sdev)
+static int pqi_sdev_init(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
unsigned long flags;
@@ -5757,9 +6551,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
rphy = target_to_rphy(starget);
device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
if (device) {
- device->target = sdev_id(sdev);
- device->lun = sdev->lun;
- device->target_lun_valid = true;
+ if (device->target_lun_valid) {
+ device->ignore_device = true;
+ } else {
+ device->target = sdev_id(sdev);
+ device->lun = sdev->lun;
+ device->target_lun_valid = true;
+ }
}
} else {
device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
@@ -5774,10 +6572,13 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev,
device->advertised_queue_depth);
}
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device)) {
pqi_disable_write_same(sdev);
- else
+ } else {
sdev->allow_restart = 1;
+ if (device->device_type == SA_DEVICE_TYPE_NVME)
+ pqi_disable_write_same(sdev);
+ }
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5785,36 +6586,101 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[0],
- ctrl_info->pci_dev, 0);
+ if (!ctrl_info->disable_managed_interrupts)
+ blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ &ctrl_info->pci_dev->dev, 0);
+ else
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+}
+
+static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+{
+ return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
+}
+
+static int pqi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
+{
+ int rc = 0;
+ struct pqi_scsi_dev *device;
+
+ device = sdev->hostdata;
+ device->devtype = sdev->type;
+
+ if (pqi_is_tape_changer_device(device) && device->ignore_device) {
+ rc = -ENXIO;
+ device->ignore_device = false;
+ }
+
+ return rc;
}
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
- void __user *arg)
+static void pqi_sdev_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct pqi_tmf_work *tmf_work;
+ int mutex_acquired;
+ unsigned int lun;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ device->lun_count--;
+ if (device->lun_count > 0) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
+ cancel_work_sync(&tmf_work->work_struct);
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
u32 subsystem_vendor;
u32 subsystem_device;
- cciss_pci_info_struct pciinfo;
+ cciss_pci_info_struct pci_info;
if (!arg)
return -EINVAL;
pci_dev = ctrl_info->pci_dev;
- pciinfo.domain = pci_domain_nr(pci_dev->bus);
- pciinfo.bus = pci_dev->bus->number;
- pciinfo.dev_fn = pci_dev->devfn;
+ pci_info.domain = pci_domain_nr(pci_dev->bus);
+ pci_info.bus = pci_dev->bus->number;
+ pci_info.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
- subsystem_vendor;
+ pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
- if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+ if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
return -EFAULT;
return 0;
@@ -5918,6 +6784,8 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
+ if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
+ return -EBUSY;
if (!arg)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
@@ -5943,17 +6811,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
}
if (iocommand.buf_size > 0) {
- kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if (!kernel_buffer)
- return -ENOMEM;
if (iocommand.Request.Type.Direction & XFER_WRITE) {
- if (copy_from_user(kernel_buffer, iocommand.buf,
- iocommand.buf_size)) {
- rc = -EFAULT;
- goto out;
- }
+ kernel_buffer = memdup_user(iocommand.buf,
+ iocommand.buf_size);
+ if (IS_ERR(kernel_buffer))
+ return PTR_ERR(kernel_buffer);
} else {
- memset(kernel_buffer, 0, iocommand.buf_size);
+ kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL);
+ if (!kernel_buffer)
+ return -ENOMEM;
}
}
@@ -5998,8 +6864,11 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le16(iu_length, &request.header.iu_length);
+ if (ctrl_info->raid_iu_timeout_supported)
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+ PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
@@ -6043,16 +6912,14 @@ out:
return rc;
}
-static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
+ void __user *arg)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
- return -EBUSY;
-
switch (cmd) {
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
@@ -6076,23 +6943,52 @@ static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return rc;
}
-static ssize_t pqi_version_show(struct device *dev,
+static ssize_t pqi_firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
struct Scsi_Host *shost;
struct pqi_ctrl_info *ctrl_info;
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- count += snprintf(buffer + count, PAGE_SIZE - count,
- " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+}
- count += snprintf(buffer + count, PAGE_SIZE - count,
- "firmware: %s\n", ctrl_info->firmware_version);
+static ssize_t pqi_serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
- return count;
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+}
+
+static ssize_t pqi_model_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+}
+
+static ssize_t pqi_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
}
static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6113,14 +7009,14 @@ static ssize_t pqi_lockup_action_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
if (pqi_lockup_actions[i].action == pqi_lockup_action)
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"[%s] ", pqi_lockup_actions[i].name);
else
- count += snprintf(buffer + count, PAGE_SIZE - count,
+ count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s ", pqi_lockup_actions[i].name);
}
- count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+ count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
return count;
}
@@ -6132,7 +7028,7 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
char *action_name;
char action_name_buffer[32];
- strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
+ strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
action_name = strstrip(action_name_buffer);
for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
@@ -6145,18 +7041,121 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
-static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
-static DEVICE_ATTR(lockup_action, 0644,
- pqi_lockup_action_show, pqi_lockup_action_store);
+static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n",
+ ctrl_info->enable_stream_detection);
+}
+
+static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_stream_detection = 0;
+
+ if (kstrtou8(buffer, 0, &set_stream_detection))
+ return -EINVAL;
+
+ if (set_stream_detection > 0)
+ set_stream_detection = 1;
+
+ ctrl_info->enable_stream_detection = set_stream_detection;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
+}
+
+static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r5_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r5_writes))
+ return -EINVAL;
+
+ if (set_r5_writes > 0)
+ set_r5_writes = 1;
+
+ ctrl_info->enable_r5_writes = set_r5_writes;
+
+ return count;
+}
+
+static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+ return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
+}
+
+static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ u8 set_r6_writes = 0;
+
+ if (kstrtou8(buffer, 0, &set_r6_writes))
+ return -EINVAL;
+
+ if (set_r6_writes > 0)
+ set_r6_writes = 1;
+
+ ctrl_info->enable_r6_writes = set_r6_writes;
-static struct device_attribute *pqi_shost_attrs[] = {
- &dev_attr_version,
- &dev_attr_rescan,
- &dev_attr_lockup_action,
+ return count;
+}
+
+static DEVICE_STRING_ATTR_RO(driver_version, 0444,
+ DRIVER_VERSION BUILD_TIMESTAMP);
+static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
+static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
+static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
+static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
+static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
+ pqi_lockup_action_store);
+static DEVICE_ATTR(enable_stream_detection, 0644,
+ pqi_host_enable_stream_detection_show,
+ pqi_host_enable_stream_detection_store);
+static DEVICE_ATTR(enable_r5_writes, 0644,
+ pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
+static DEVICE_ATTR(enable_r6_writes, 0644,
+ pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
+
+static struct attribute *pqi_shost_attrs[] = {
+ &dev_attr_driver_version.attr.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_model.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_rescan.attr,
+ &dev_attr_lockup_action.attr,
+ &dev_attr_enable_stream_detection.attr,
+ &dev_attr_enable_r5_writes.attr,
+ &dev_attr_enable_r6_writes.attr,
NULL
};
+ATTRIBUTE_GROUPS(pqi_shost);
+
static ssize_t pqi_unique_id_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6164,29 +7163,36 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device)
+ memcpy(unique_id, device->wwid, sizeof(device->wwid));
+ else
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE,
- "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ return scnprintf(buffer, PAGE_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6201,22 +7207,26 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
+
memcpy(lunid, device->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+ return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
-#define MAX_PATHS 8
+#define MAX_PATHS 8
+
static ssize_t pqi_path_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -6228,25 +7238,27 @@ static ssize_t pqi_path_info_show(struct device *dev,
int output_len = 0;
u8 box;
u8 bay;
- u8 path_map_index = 0;
+ u8 path_map_index;
char *active;
- unsigned char phys_connector[2];
+ u8 phys_connector[2];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
bay = device->bay;
for (i = 0; i < MAX_PATHS; i++) {
- path_map_index = 1<<i;
+ path_map_index = 1 << i;
if (i == device->active_path_index)
active = "Active";
else if (device->path_map & path_map_index)
@@ -6297,10 +7309,10 @@ end_buffer:
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
return output_len;
}
-
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6313,19 +7325,22 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (pqi_is_logical_device(device)) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
+
sas_address = device->sas_address;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+ return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
}
static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
@@ -6339,9 +7354,17 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -6363,39 +7386,209 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
raid_level = pqi_raid_level_to_string(device->raid_level);
else
raid_level = "N/A";
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
- return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+}
+
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 raid_bypass_cnt;
+ int cpu;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ raid_bypass_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int output_len = 0;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ output_len = snprintf(buf, PAGE_SIZE, "%d\n",
+ device->ncq_prio_enable);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return output_len;
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u8 ncq_prio_enable = 0;
+
+ if (kstrtou8(buf, 0, &ncq_prio_enable))
+ return -EINVAL;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ if (!device->ncq_prio_support) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -EINVAL;
+ }
+
+ device->ncq_prio_enable = ncq_prio_enable;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return strlen(buf);
+}
+
+static ssize_t pqi_numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct scsi_device *sdev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
+}
+
+static ssize_t pqi_write_stream_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u64 write_stream_cnt;
+ int cpu;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ write_stream_cnt = 0;
+
+ if (device->raid_io_stats) {
+ for_each_online_cpu(cpu) {
+ write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
}
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
- pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
-
-static struct device_attribute *pqi_sdev_attrs[] = {
- &dev_attr_lunid,
- &dev_attr_unique_id,
- &dev_attr_path_info,
- &dev_attr_sas_address,
- &dev_attr_ssd_smart_path_enabled,
- &dev_attr_raid_level,
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
+static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
+ pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
+static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
+static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
+
+static struct attribute *pqi_sdev_attrs[] = {
+ &dev_attr_lunid.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_path_info.attr,
+ &dev_attr_sas_address.attr,
+ &dev_attr_ssd_smart_path_enabled.attr,
+ &dev_attr_raid_level.attr,
+ &dev_attr_raid_bypass_cnt.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_write_stream_cnt.attr,
NULL
};
-static struct scsi_host_template pqi_driver_template = {
+ATTRIBUTE_GROUPS(pqi_sdev);
+
+static const struct scsi_host_template pqi_driver_template = {
.module = THIS_MODULE,
.name = DRIVER_NAME_SHORT,
.proc_name = DRIVER_NAME_SHORT,
@@ -6404,11 +7597,15 @@ static struct scsi_host_template pqi_driver_template = {
.scan_finished = pqi_scan_finished,
.this_id = -1,
.eh_device_reset_handler = pqi_eh_device_reset_handler,
+ .eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
- .slave_alloc = pqi_slave_alloc,
+ .sdev_init = pqi_sdev_init,
+ .sdev_configure = pqi_sdev_configure,
+ .sdev_destroy = pqi_sdev_destroy,
.map_queues = pqi_map_queues,
- .sdev_attrs = pqi_sdev_attrs,
- .shost_attrs = pqi_shost_attrs,
+ .sdev_groups = pqi_sdev_groups,
+ .shost_groups = pqi_shost_groups,
+ .cmd_size = sizeof(struct pqi_cmd_priv),
};
static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
@@ -6418,9 +7615,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
if (!shost) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_host_alloc failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
}
@@ -6429,7 +7624,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
- shost->max_lun = ~0;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
shost->can_queue = ctrl_info->scsi_ml_can_queue;
@@ -6439,21 +7634,18 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
shost->unique_id = shost->irq;
shost->nr_hw_queues = ctrl_info->num_queue_groups;
+ shost->host_tagset = 1;
shost->hostdata[0] = (unsigned long)ctrl_info;
rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi_add_host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
goto free_host;
}
rc = pqi_add_sas_host(shost, ctrl_info);
if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "add SAS host failed for controller %u\n",
- ctrl_info->ctrl_id);
+ dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
goto remove_host;
}
@@ -6500,8 +7692,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -6523,8 +7714,7 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
rc = sis_pqi_reset_quiesce(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "PQI reset failed during quiesce with error %d\n",
- rc);
+ "PQI reset failed during quiesce with error %d\n", rc);
return rc;
}
}
@@ -6543,7 +7733,30 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
return rc;
}
-static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct bmic_sense_subsystem_info *sense_info;
+
+ sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
+ if (!sense_info)
+ return -ENOMEM;
+
+ rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
+ if (rc)
+ goto out;
+
+ memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
+ sizeof(sense_info->ctrl_serial_number));
+ ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
+
+out:
+ kfree(sense_info);
+
+ return rc;
+}
+
+static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
{
int rc;
struct bmic_identify_controller *identify;
@@ -6556,13 +7769,35 @@ static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- memcpy(ctrl_info->firmware_version, identify->firmware_version,
- sizeof(identify->firmware_version));
- ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
- snprintf(ctrl_info->firmware_version +
- strlen(ctrl_info->firmware_version),
- sizeof(ctrl_info->firmware_version),
- "-%u", get_unaligned_le16(&identify->firmware_build_number));
+ if (get_unaligned_le32(&identify->extra_controller_flags) &
+ BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_long,
+ sizeof(identify->firmware_version_long));
+ } else {
+ memcpy(ctrl_info->firmware_version,
+ identify->firmware_version_short,
+ sizeof(identify->firmware_version_short));
+ ctrl_info->firmware_version
+ [sizeof(identify->firmware_version_short)] = '\0';
+ snprintf(ctrl_info->firmware_version +
+ strlen(ctrl_info->firmware_version),
+ sizeof(ctrl_info->firmware_version) -
+ sizeof(identify->firmware_version_short),
+ "-%u",
+ get_unaligned_le16(&identify->firmware_build_number));
+ }
+
+ memcpy(ctrl_info->model, identify->product_id,
+ sizeof(identify->product_id));
+ ctrl_info->model[sizeof(identify->product_id)] = '\0';
+
+ memcpy(ctrl_info->vendor, identify->vendor_id,
+ sizeof(identify->vendor_id));
+ ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
out:
kfree(identify);
@@ -6641,8 +7876,7 @@ static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
put_unaligned_le16(last_section,
&request.data.config_table_update.last_section);
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
@@ -6651,6 +7885,7 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
{
void *features_requested;
void __iomem *features_requested_iomem_addr;
+ void __iomem *host_max_known_feature_iomem_addr;
features_requested = firmware_features->features_supported +
le16_to_cpu(firmware_features->num_elements);
@@ -6661,6 +7896,16 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
memcpy_toio(features_requested_iomem_addr, features_requested,
le16_to_cpu(firmware_features->num_elements));
+ if (pqi_is_firmware_feature_supported(firmware_features,
+ PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
+ host_max_known_feature_iomem_addr =
+ features_requested_iomem_addr +
+ (le16_to_cpu(firmware_features->num_elements) * 2) +
+ sizeof(__le16);
+ writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
+ writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
+ }
+
return pqi_config_table_update(ctrl_info,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
@@ -6694,6 +7939,48 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
firmware_feature->feature_name);
}
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
+ ctrl_info->enable_r1_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
+ ctrl_info->enable_r5_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
+ ctrl_info->enable_r6_writes = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+ ctrl_info->soft_reset_handshake_supported =
+ firmware_feature->enabled &&
+ pqi_read_soft_reset_status(ctrl_info);
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
+ ctrl_info->firmware_triage_supported = firmware_feature->enabled;
+ pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
+ break;
+ case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
+ ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
+ ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
+ break;
+ }
+
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
@@ -6715,10 +8002,90 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_status = pqi_firmware_feature_status,
},
{
+ .feature_name = "Maximum Known Feature",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 5 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 6 Read Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 0 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "RAID 1 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 5 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID 6 Write Bypass",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
.feature_status = pqi_firmware_feature_status,
},
+ {
+ .feature_name = "Firmware Triage",
+ .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RPL Extended Formats 4 and 5",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "Controller Data Logging",
+ .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -6771,18 +8138,13 @@ static void pqi_process_firmware_features(
return;
}
- ctrl_info->soft_reset_handshake_supported = false;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
if (pqi_is_firmware_feature_enabled(firmware_features,
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
- pqi_firmware_features[i].enabled = true;
- if (pqi_firmware_features[i].feature_bit ==
- PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
- ctrl_info->soft_reset_handshake_supported =
- true;
+ pqi_firmware_features[i].enabled = true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -6808,14 +8170,37 @@ static void pqi_process_firmware_features_section(
mutex_unlock(&pqi_firmware_features_mutex);
}
+/*
+ * Reset all controller settings that can be initialized during the processing
+ * of the PQI Configuration Table.
+ */
+
+static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->heartbeat_counter = NULL;
+ ctrl_info->soft_reset_status = NULL;
+ ctrl_info->soft_reset_handshake_supported = false;
+ ctrl_info->enable_r1_writes = false;
+ ctrl_info->enable_r5_writes = false;
+ ctrl_info->enable_r6_writes = false;
+ ctrl_info->raid_iu_timeout_supported = false;
+ ctrl_info->tmf_iu_timeout_supported = false;
+ ctrl_info->firmware_triage_supported = false;
+ ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
+ ctrl_info->ctrl_logging_supported = false;
+}
+
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
u32 table_length;
u32 section_offset;
+ bool firmware_feature_section_present;
void __iomem *table_iomem_addr;
struct pqi_config_table *config_table;
struct pqi_config_table_section_header *section;
struct pqi_config_table_section_info section_info;
+ struct pqi_config_table_section_info feature_section_info = {0};
table_length = ctrl_info->config_table_length;
if (table_length == 0)
@@ -6832,25 +8217,24 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
* Copy the config table contents from I/O memory space into the
* temporary buffer.
*/
- table_iomem_addr = ctrl_info->iomem_base +
- ctrl_info->config_table_offset;
+ table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
memcpy_fromio(config_table, table_iomem_addr, table_length);
+ firmware_feature_section_present = false;
section_info.ctrl_info = ctrl_info;
- section_offset =
- get_unaligned_le32(&config_table->first_section_offset);
+ section_offset = get_unaligned_le32(&config_table->first_section_offset);
while (section_offset) {
section = (void *)config_table + section_offset;
section_info.section = section;
section_info.section_offset = section_offset;
- section_info.section_iomem_addr =
- table_iomem_addr + section_offset;
+ section_info.section_iomem_addr = table_iomem_addr + section_offset;
switch (get_unaligned_le16(&section->section_id)) {
case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
- pqi_process_firmware_features_section(&section_info);
+ firmware_feature_section_present = true;
+ feature_section_info = section_info;
break;
case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
if (pqi_disable_heartbeat)
@@ -6860,8 +8244,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
ctrl_info->heartbeat_counter =
table_iomem_addr +
section_offset +
- offsetof(
- struct pqi_config_table_heartbeat,
+ offsetof(struct pqi_config_table_heartbeat,
heartbeat_counter);
break;
case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
@@ -6869,14 +8252,21 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
table_iomem_addr +
section_offset +
offsetof(struct pqi_config_table_soft_reset,
- soft_reset_status);
+ soft_reset_status);
break;
}
- section_offset =
- get_unaligned_le16(&section->next_section_offset);
+ section_offset = get_unaligned_le16(&section->next_section_offset);
}
+ /*
+ * We process the firmware feature section after all other sections
+ * have been processed so that the feature bit callbacks can take
+ * into account the settings configured by other sections.
+ */
+ if (firmware_feature_section_present)
+ pqi_process_firmware_features_section(&feature_section_info);
+
kfree(config_table);
return 0;
@@ -6924,21 +8314,63 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
+
+#define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
+#define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
+ u32 product_id;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
+ rc = sis_wait_for_fw_triage_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
+ if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
+ sis_notify_kdump(ctrl_info);
+ rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
+ if (rc)
+ return rc;
+ }
+ sis_soft_reset(ctrl_info);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc)
+ if (rc) {
+ if (reset_devices) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "kdump init failed with error %d\n", rc);
+ pqi_lockup_action = REBOOT;
+ pqi_perform_lockup_action();
+ }
return rc;
+ }
/*
* Get the controller properties. This allows us to determine
@@ -6958,15 +8390,19 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (reset_devices) {
+ product_id = sis_get_product_id(ctrl_info);
+ ctrl_info->product_id = (u8)product_id;
+ ctrl_info->product_revision = (u8)(product_id >> 8);
+
+ if (is_kdump_kernel()) {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
} else {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS)
- ctrl_info->max_outstanding_requests =
+ ctrl_info->max_outstanding_requests =
PQI_MAX_OUTSTANDING_REQUESTS;
}
@@ -7053,11 +8489,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_init_operational_queues(ctrl_info);
- rc = pqi_request_irqs(ctrl_info);
+ rc = pqi_create_queues(ctrl_info);
if (rc)
return rc;
- rc = pqi_create_queues(ctrl_info);
+ rc = pqi_request_irqs(ctrl_info);
if (rc)
return rc;
@@ -7071,6 +8507,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) { /* Supported features not returned correctly. */
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7083,10 +8530,22 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
+ pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ }
+
+ rc = pqi_get_ctrl_product_details(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining product details\n");
+ return rc;
+ }
+
+ rc = pqi_get_ctrl_serial_number(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining firmware version\n");
+ "error obtaining ctrl serial number\n");
return rc;
}
@@ -7213,12 +8672,25 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
ctrl_info->controller_online = true;
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_reset_config(ctrl_info);
+
rc = pqi_process_config_table(ctrl_info);
if (rc)
return rc;
pqi_start_heartbeat_timer(ctrl_info);
+ if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
+ rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining advanced RAID bypass configuration\n");
+ return rc;
+ }
+ ctrl_info->ciss_report_log_flags |=
+ CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
+ }
+
rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
@@ -7226,10 +8698,10 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining firmware version\n");
+ "error obtaining product details\n");
return rc;
}
@@ -7247,18 +8719,36 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- pqi_schedule_update_time_worker(ctrl_info);
+ if (pqi_ofa_in_progress(ctrl_info)) {
+ pqi_ctrl_unblock_scan(ctrl_info);
+ if (ctrl_info->ctrl_logging_supported) {
+ if (!ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_setup_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory,
+ PQI_CTRL_LOG_TOTAL_SIZE,
+ PQI_CTRL_LOG_MIN_SIZE);
+ pqi_host_memory_update(ctrl_info,
+ &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
+ } else {
+ if (ctrl_info->ctrl_log_memory.host_memory)
+ pqi_host_free_buffer(ctrl_info,
+ &ctrl_info->ctrl_log_memory);
+ }
+ }
pqi_scan_scsi_devices(ctrl_info);
return 0;
}
-static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
- u16 timeout)
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
{
- return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
+ int rc;
+
+ rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
+
+ return pcibios_err_to_errno(rc);
}
static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
@@ -7278,7 +8768,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
else
mask = DMA_BIT_MASK(32);
- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
goto disable_device;
@@ -7291,9 +8781,9 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto disable_device;
}
- ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+ ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
- sizeof(struct pqi_ctrl_registers));
+ pci_resource_len(ctrl_info->pci_dev, 0));
if (!ctrl_info->iomem_base) {
dev_err(&ctrl_info->pci_dev->dev,
"failed to map memory for controller registers\n");
@@ -7364,19 +8854,27 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
+ INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
+ INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
+
sema_init(&ctrl_info->sync_request_sem,
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
init_waitqueue_head(&ctrl_info->block_requests_wait);
- INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
- spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
- INIT_WORK(&ctrl_info->raid_bypass_retry_work,
- pqi_raid_bypass_retry_worker);
-
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
ctrl_info->irq_mode = IRQ_MODE_NONE;
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+ ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+ ctrl_info->max_transfer_encrypted_sas_sata =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
+ ctrl_info->max_transfer_encrypted_nvme =
+ PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
+ ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
+ ctrl_info->max_write_raid_1_10_2drive = ~0;
+ ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
+
return ctrl_info;
}
@@ -7393,7 +8891,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -7418,9 +8915,16 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
+ pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -7429,228 +8933,203 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_ctrl_ofa_start(ctrl_info);
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
- pqi_fail_io_queued_for_all_devices(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
pqi_stop_heartbeat_timer(ctrl_info);
- ctrl_info->pqi_mode_enabled = false;
- pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
}
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
{
- pqi_ofa_free_host_buffer(ctrl_info);
- ctrl_info->pqi_mode_enabled = true;
- pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
- ctrl_info->controller_online = true;
- pqi_ctrl_unblock_requests(ctrl_info);
pqi_start_heartbeat_timer(ctrl_info);
- pqi_schedule_update_time_worker(ctrl_info);
- pqi_clear_soft_reset_status(ctrl_info,
- PQI_SOFT_RESET_ABORT);
- pqi_scan_scsi_devices(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+}
+
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
+{
+ ssleep(delay_secs);
+
+ return pqi_ctrl_init_resume(ctrl_info);
}
-static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
u32 total_size, u32 chunk_size)
{
- u32 sg_count;
- u32 size;
int i;
- struct pqi_sg_descriptor *mem_descriptor = NULL;
+ u32 sg_count;
struct device *dev;
- struct pqi_ofa_memory *ofap;
-
- dev = &ctrl_info->pci_dev->dev;
-
- sg_count = (total_size + chunk_size - 1);
- sg_count /= chunk_size;
-
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+ struct pqi_host_memory *host_memory;
+ struct pqi_sg_descriptor *mem_descriptor;
+ dma_addr_t dma_handle;
- if (sg_count*chunk_size < total_size)
+ sg_count = DIV_ROUND_UP(total_size, chunk_size);
+ if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
goto out;
- ctrl_info->pqi_ofa_chunk_virt_addr =
- kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
- if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+ host_memory_descriptor->host_chunk_virt_address =
+ kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address)
goto out;
- for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
- dma_addr_t dma_handle;
-
- ctrl_info->pqi_ofa_chunk_virt_addr[i] =
- dma_alloc_coherent(dev, chunk_size, &dma_handle,
- GFP_KERNEL);
-
- if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
- break;
+ dev = &ctrl_info->pci_dev->dev;
+ host_memory = host_memory_descriptor->host_memory;
- mem_descriptor = &ofap->sg_descriptor[i];
- put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
- put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+ for (i = 0; i < sg_count; i++) {
+ host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
+ if (!host_memory_descriptor->host_chunk_virt_address[i])
+ goto out_free_chunks;
+ mem_descriptor = &host_memory->sg_descriptor[i];
+ put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
+ put_unaligned_le32(chunk_size, &mem_descriptor->length);
}
- if (!size || size < total_size)
- goto out_free_chunks;
-
put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
- put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
- put_unaligned_le32(size, &ofap->bytes_allocated);
+ put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
+ put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
return 0;
out_free_chunks:
while (--i >= 0) {
- mem_descriptor = &ofap->sg_descriptor[i];
+ mem_descriptor = &host_memory->sg_descriptor[i];
dma_free_coherent(dev, chunk_size,
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
- get_unaligned_le64(&mem_descriptor->address));
+ host_memory_descriptor->host_chunk_virt_address[i],
+ get_unaligned_le64(&mem_descriptor->address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
-
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
- put_unaligned_le32 (0, &ofap->bytes_allocated);
return -ENOMEM;
}
-static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_required_size, u32 min_required_size)
{
- u32 total_size;
+ u32 chunk_size;
u32 min_chunk_size;
- u32 chunk_sz;
- total_size = le32_to_cpu(
- ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
- min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+ if (total_required_size == 0 || min_required_size == 0)
+ return 0;
- for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
- if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
- return 0;
+ total_required_size = PAGE_ALIGN(total_required_size);
+ min_required_size = PAGE_ALIGN(min_required_size);
+ min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
+ min_chunk_size = PAGE_ALIGN(min_chunk_size);
+
+ while (total_required_size >= min_required_size) {
+ for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
+ if (pqi_host_alloc_mem(ctrl_info,
+ host_memory_descriptor, total_required_size,
+ chunk_size) == 0)
+ return 0;
+ chunk_size /= 2;
+ chunk_size = PAGE_ALIGN(chunk_size);
+ }
+ total_required_size /= 2;
+ total_required_size = PAGE_ALIGN(total_required_size);
+ }
return -ENOMEM;
}
-static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
- u32 bytes_requested)
+static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u32 total_size, u32 min_size)
{
- struct pqi_ofa_memory *pqi_ofa_memory;
struct device *dev;
+ struct pqi_host_memory *host_memory;
dev = &ctrl_info->pci_dev->dev;
- pqi_ofa_memory = dma_alloc_coherent(dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
- &ctrl_info->pqi_ofa_mem_dma_handle,
- GFP_KERNEL);
- if (!pqi_ofa_memory)
+ host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
+ &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
+ if (!host_memory)
return;
- put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
- memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
- sizeof(pqi_ofa_memory->signature));
- pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
-
- ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+ host_memory_descriptor->host_memory = host_memory;
- if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
- dev_err(dev, "Failed to allocate host buffer of size = %u",
- bytes_requested);
+ if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
+ total_size, min_size) < 0) {
+ dev_err(dev, "failed to allocate firmware usable host buffer\n");
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
+ return;
}
}
-static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor)
{
- int i;
+ unsigned int i;
+ struct device *dev;
+ struct pqi_host_memory *host_memory;
struct pqi_sg_descriptor *mem_descriptor;
- struct pqi_ofa_memory *ofap;
-
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+ unsigned int num_memory_descriptors;
- if (!ofap)
+ host_memory = host_memory_descriptor->host_memory;
+ if (!host_memory)
return;
- if (!ofap->bytes_allocated)
+ dev = &ctrl_info->pci_dev->dev;
+
+ if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
goto out;
- mem_descriptor = ofap->sg_descriptor;
+ mem_descriptor = host_memory->sg_descriptor;
+ num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
- for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
- i++) {
- dma_free_coherent(&ctrl_info->pci_dev->dev,
+ for (i = 0; i < num_memory_descriptors; i++) {
+ dma_free_coherent(dev,
get_unaligned_le32(&mem_descriptor[i].length),
- ctrl_info->pqi_ofa_chunk_virt_addr[i],
+ host_memory_descriptor->host_chunk_virt_address[i],
get_unaligned_le64(&mem_descriptor[i].address));
}
- kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+ kfree(host_memory_descriptor->host_chunk_virt_address);
out:
- dma_free_coherent(&ctrl_info->pci_dev->dev,
- PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
- ctrl_info->pqi_ofa_mem_dma_handle);
- ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+ dma_free_coherent(dev, sizeof(*host_memory), host_memory,
+ host_memory_descriptor->host_memory_dma_handle);
+ host_memory_descriptor->host_memory = NULL;
}
-static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_host_memory_descriptor *host_memory_descriptor,
+ u16 function_code)
{
+ u32 buffer_length;
struct pqi_vendor_general_request request;
- size_t size;
- struct pqi_ofa_memory *ofap;
+ struct pqi_host_memory *host_memory;
memset(&request, 0, sizeof(request));
- ofap = ctrl_info->pqi_ofa_mem_virt_addr;
-
request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
- put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
- &request.header.iu_length);
- put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
- &request.function_code);
-
- if (ofap) {
- size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
- get_unaligned_le16(&ofap->num_memory_descriptors) *
- sizeof(struct pqi_sg_descriptor);
-
- put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
- &request.data.ofa_memory_allocation.buffer_address);
- put_unaligned_le32(size,
- &request.data.ofa_memory_allocation.buffer_length);
-
+ put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+ put_unaligned_le16(function_code, &request.function_code);
+
+ host_memory = host_memory_descriptor->host_memory;
+
+ if (host_memory) {
+ buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
+ put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
+ put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
+
+ if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
+ } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
+ put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
+ memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
+ }
}
- return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, NULL, NO_TIMEOUT);
-}
-
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
-static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
-{
- msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
- return pqi_ctrl_init_resume(ctrl_info);
-}
-
-static void pqi_perform_lockup_action(void)
-{
- switch (pqi_lockup_action) {
- case PANIC:
- panic("FATAL: Smart Family Controller lockup detected");
- break;
- case REBOOT:
- emergency_restart();
- break;
- case NONE:
- default:
- break;
- }
+ return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
}
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
@@ -7663,6 +9142,7 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
unsigned int i;
struct pqi_io_request *io_request;
struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
for (i = 0; i < ctrl_info->max_io_slots; i++) {
io_request = &ctrl_info->io_request_pool[i];
@@ -7671,7 +9151,13 @@ static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
scmd = io_request->scmd;
if (scmd) {
- set_host_byte(scmd, DID_NO_CONNECT);
+ sdev = scmd->device;
+ if (!sdev || !scsi_device_online(sdev)) {
+ pqi_free_io_request(io_request);
+ continue;
+ } else {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ }
} else {
io_request->status = -ENXIO;
io_request->error_info =
@@ -7692,8 +9178,8 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
pqi_cancel_update_time_worker(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_all_outstanding_requests(ctrl_info);
- pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_take_ctrl_devices_offline(ctrl_info);
}
static void pqi_ctrl_offline_worker(struct work_struct *work)
@@ -7704,7 +9190,54 @@ static void pqi_ctrl_offline_worker(struct work_struct *work)
pqi_take_ctrl_offline_deferred(ctrl_info);
}
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
+{
+ char *string;
+
+ switch (ctrl_shutdown_reason) {
+ case PQI_IQ_NOT_DRAINED_TIMEOUT:
+ string = "inbound queue not drained timeout";
+ break;
+ case PQI_LUN_RESET_TIMEOUT:
+ string = "LUN reset timeout";
+ break;
+ case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
+ string = "I/O pending timeout after LUN reset";
+ break;
+ case PQI_NO_HEARTBEAT:
+ string = "no controller heartbeat detected";
+ break;
+ case PQI_FIRMWARE_KERNEL_NOT_UP:
+ string = "firmware kernel not ready";
+ break;
+ case PQI_OFA_RESPONSE_TIMEOUT:
+ string = "OFA response timeout";
+ break;
+ case PQI_INVALID_REQ_ID:
+ string = "invalid request ID";
+ break;
+ case PQI_UNMATCHED_REQ_ID:
+ string = "unmatched request ID";
+ break;
+ case PQI_IO_PI_OUT_OF_RANGE:
+ string = "I/O queue producer index out of range";
+ break;
+ case PQI_EVENT_PI_OUT_OF_RANGE:
+ string = "event queue producer index out of range";
+ break;
+ case PQI_UNEXPECTED_IU_TYPE:
+ string = "unexpected IU type";
+ break;
+ default:
+ string = "unknown reason";
+ break;
+ }
+
+ return string;
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{
if (!ctrl_info->controller_online)
return;
@@ -7713,12 +9246,35 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = false;
pqi_ctrl_block_requests(ctrl_info);
if (!pqi_disable_ctrl_shutdown)
- sis_shutdown_ctrl(ctrl_info);
+ sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
pci_disable_device(ctrl_info->pci_dev);
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller offline: reason code 0x%x (%s)\n",
+ ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
schedule_work(&ctrl_info->ctrl_offline_work);
}
+static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list);
+ if (rc)
+ continue;
+
+ /*
+ * Is the sdev pointer NULL?
+ */
+ if (device->sdev)
+ scsi_device_set_state(device->sdev, SDEV_OFFLINE);
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -7727,7 +9283,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
if (id->driver_data)
ctrl_description = (char *)id->driver_data;
else
- ctrl_description = "Microsemi Smart Family Controller";
+ ctrl_description = "Microchip Smart Family Controller";
dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
@@ -7736,7 +9292,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int rc;
- int node, cp_node;
+ int node;
struct pqi_ctrl_info *ctrl_info;
pqi_print_ctrl_info(pci_dev, id);
@@ -7755,10 +9311,10 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE) {
- cp_node = cpu_to_node(0);
- if (cp_node == NUMA_NO_NODE)
- cp_node = 0;
- set_dev_node(&pci_dev->dev, cp_node);
+ node = cpu_to_node(0);
+ if (node == NUMA_NO_NODE)
+ node = 0;
+ set_dev_node(&pci_dev->dev, node);
}
ctrl_info = pqi_alloc_ctrl_info(node);
@@ -7767,6 +9323,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
+ ctrl_info->numa_node = node;
ctrl_info->pci_dev = pci_dev;
@@ -7789,38 +9346,81 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
+ int rc;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
- ctrl_info->in_shutdown = true;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
+ rc = pqi_flush_cache(ctrl_info, RESTART);
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache during remove\n");
+ }
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_wait_until_ofa_finished(ctrl_info);
+
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ if (system_state == SYSTEM_RESTART)
+ shutdown_event = RESTART;
+ else
+ shutdown_event = SHUTDOWN;
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
- rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
- return;
+ rc = pqi_flush_cache(ctrl_info, shutdown_event);
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache during shutdown\n");
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
}
static void pqi_process_lockup_action_param(void)
@@ -7842,35 +9442,69 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
+}
+
+#if defined(CONFIG_PM)
+
+static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
+{
+ if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
+ return RESTART;
+
+ return SUSPEND;
}
-static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
{
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- pqi_disable_events(ctrl_info);
- pqi_cancel_update_time_worker(ctrl_info);
- pqi_cancel_rescan_worker(ctrl_info);
- pqi_wait_until_scan_finished(ctrl_info);
- pqi_wait_until_lun_reset_finished(ctrl_info);
pqi_wait_until_ofa_finished(ctrl_info);
- pqi_flush_cache(ctrl_info, SUSPEND);
+
+ pqi_ctrl_block_scan(ctrl_info);
+ pqi_scsi_block_requests(ctrl_info);
+ pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_wait_until_inbound_queues_empty(ctrl_info);
- pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
- pqi_stop_heartbeat_timer(ctrl_info);
- if (state.event == PM_EVENT_FREEZE)
- return 0;
+ if (suspend) {
+ enum bmic_flush_cache_shutdown_event shutdown_event;
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+ }
+
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_free_irqs(ctrl_info);
ctrl_info->controller_online = false;
ctrl_info->pqi_mode_enabled = false;
@@ -7878,37 +9512,89 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
return 0;
}
-static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+static __maybe_unused int pqi_suspend(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, true);
+}
+
+static int pqi_resume_or_restore(struct device *dev)
{
int rc;
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- if (pci_dev->current_state != PCI_D0) {
- ctrl_info->max_hw_queue_index = 0;
- pqi_free_interrupts(ctrl_info);
- pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
- rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
- IRQF_SHARED, DRIVER_NAME_SHORT,
- &ctrl_info->queue_groups[0]);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "irq %u init failed with error %d\n",
- pci_dev->irq, rc);
- return rc;
- }
- pqi_start_heartbeat_timer(ctrl_info);
- pqi_ctrl_unblock_requests(ctrl_info);
- return 0;
- }
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
+ ssleep(PQI_POST_RESET_DELAY_SECS);
return pqi_ctrl_init_resume(ctrl_info);
}
+static int pqi_freeze(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, false);
+}
+
+static int pqi_thaw(struct device *dev)
+{
+ int rc;
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ ctrl_info->controller_online = true;
+ ctrl_info->pqi_mode_enabled = true;
+
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
+ return 0;
+}
+
+static int pqi_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pqi_pm_ops = {
+ .suspend = pqi_suspend,
+ .resume = pqi_resume_or_restore,
+ .freeze = pqi_freeze,
+ .thaw = pqi_thaw,
+ .poweroff = pqi_poweroff,
+ .restore = pqi_resume_or_restore,
+};
+
+#endif /* CONFIG_PM */
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
@@ -7941,6 +9627,42 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x0462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1105)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1106)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1107)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1108)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1109)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x110b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x1110)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -7949,6 +9671,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x8462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0xc460)
},
{
@@ -7993,6 +9719,82 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3a, 0x0104)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -8025,6 +9827,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -8057,6 +9863,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0809)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x080a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
@@ -8137,10 +9955,202 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1304)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1463)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1473)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1475)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4084)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4094)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4140)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4240)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4840)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -8205,6 +10215,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_HP, 0x1100)
},
{
@@ -8213,6 +10227,442 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0294)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x02dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x032e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x036f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0381)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0382)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0383)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0800)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0908)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0916)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_GIGABYTE, 0x1000)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1dfc, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f0c, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0804)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0805)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5445)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5446)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5447)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5449)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54da)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b27)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b29)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0b45)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1018, 0x8238)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3f, 0x0610)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0222)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0223)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0224)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0225)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0521)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0624)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0625)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0626)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0627)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0628)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f8)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f9)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fa)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fe)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02ff)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x0300)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ded, 0x3301)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0046)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0047)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0048)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0051)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0052)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0053)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1000)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1003)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1004)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1006)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1007)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1008)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1009)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1010)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1011)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1043)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ff9, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
@@ -8227,8 +10677,9 @@ static struct pci_driver pqi_pci_driver = {
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
#if defined(CONFIG_PM)
- .suspend = pqi_suspend,
- .resume = pqi_resume,
+ .driver = {
+ .pm = &pqi_pm_ops
+ },
#endif
};
@@ -8237,9 +10688,10 @@ static int __init pqi_init(void)
int rc;
pr_info(DRIVER_NAME "\n");
+ pqi_verify_structures();
+ sis_verify_structures();
- pqi_sas_transport_template =
- sas_attach_transport(&pqi_sas_transport_functions);
+ pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
return -ENODEV;
@@ -8261,7 +10713,7 @@ static void __exit pqi_cleanup(void)
module_init(pqi_init);
module_exit(pqi_cleanup);
-static void __attribute__((unused)) verify_structures(void)
+static void pqi_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_host_to_ctrl_doorbell) != 0x20);
@@ -8274,8 +10726,12 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_driver_scratch) != 0xb0);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_product_identifier) != 0xb4);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_firmware_status) != 0xbc);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+ sis_ctrl_shutdown_reason_code) != 0xcc);
+ BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_mailbox) != 0x1000);
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
pqi_registers) != 0x4000);
@@ -8287,7 +10743,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
response_queue_id) != 0x4);
BUILD_BUG_ON(offsetof(struct pqi_iu_header,
- work_area) != 0x6);
+ driver_flags) != 0x6);
BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
@@ -8385,7 +10841,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
@@ -8429,11 +10885,11 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
data.delete_operational_queue.queue_id) != 12);
BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.create_operational_iq) != 64 - 11);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.create_operational_oq) != 64 - 11);
- BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+ BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
data.delete_operational_queue) != 64 - 11);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -8441,7 +10897,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
header.iu_length) != 2);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -8465,7 +10921,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
@@ -8481,6 +10937,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
cdb) != 32);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ timeout) != 60);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
sg_descriptors) != 64);
BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
@@ -8492,7 +10950,7 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
header.response_queue_id) != 4);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
- header.work_area) != 6);
+ header.driver_flags) != 6);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
request_id) != 8);
BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
@@ -8635,6 +11093,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
nexus_id) != 10);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ timeout) != 14);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
lun_number) != 16);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
protocol_specific) != 24);
@@ -8665,13 +11125,23 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
configuration_signature) != 1);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
- firmware_version) != 5);
+ firmware_version_short) != 5);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
extended_logical_unit_count) != 154);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
firmware_build_number) != 190);
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ vendor_id) != 200);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ product_id) != 208);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ extra_controller_flags) != 286);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
controller_mode) != 292);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ spare_part_number) != 293);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+ firmware_version_long) != 325);
BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
phys_bay_in_box) != 115);
@@ -8689,6 +11159,45 @@ static void __attribute__((unused)) verify_structures(void)
current_queue_depth_limit) != 1796);
BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
+ buffer_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_code) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ subpage_code) != 1);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
+ page_length) != 2);
+
+ BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
+ != 18);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ header) != 0);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_read_support) != 4);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_read_support) != 5);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ firmware_write_support) != 6);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ driver_write_support) != 7);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_sas_sata) != 8);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_transfer_encrypted_nvme) != 10);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_5_6) != 12);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_2drive) != 14);
+ BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
+ max_write_raid_1_10_3drive) != 16);
+
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %