summaryrefslogtreecommitdiff
path: root/drivers/scsi/mpt3sas/mpt3sas_scsih.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/mpt3sas/mpt3sas_scsih.c')
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8492
1 files changed, 6614 insertions, 1878 deletions
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cbe8fd21fc4..7092d0debef3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2,8 +2,9 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
- * Copyright (C) 2012-2013 LSI Corporation
- * (mailto:DL-MPTFusionLinux@lsi.com)
+ * Copyright (C) 2012-2014 LSI Corporation
+ * Copyright (C) 2013-2014 Avago Technologies
+ * (mailto: MPT-FusionLinux.pdl@avagotech.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -51,17 +52,15 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/aer.h>
#include <linux/raid_class.h>
+#include <linux/unaligned.h>
#include "mpt3sas_base.h"
-MODULE_AUTHOR(MPT3SAS_AUTHOR);
-MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
-MODULE_LICENSE("GPL");
-MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
-
#define RAID_CHANNEL 1
+
+#define PCIE_CHANNEL 2
+
/* forward proto's */
static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander);
@@ -71,14 +70,24 @@ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
struct _sas_device *sas_device);
static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u8 retry_count, u8 is_pd);
-
+static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device);
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-
-static void _scsih_scan_start(struct Scsi_Host *shost);
-static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
/* global parameters */
LIST_HEAD(mpt3sas_ioc_list);
+/* global ioc lock for list operations */
+DEFINE_SPINLOCK(gioc_lock);
+
+MODULE_AUTHOR(MPT3SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
+MODULE_ALIAS("mpt2sas");
/* local parameters */
static u8 scsi_io_cb_idx = -1;
@@ -89,7 +98,8 @@ static u8 port_enable_cb_idx = -1;
static u8 transport_cb_idx = -1;
static u8 scsih_cb_idx = -1;
static u8 config_cb_idx = -1;
-static int mpt_ids;
+static int mpt2_ids;
+static int mpt3_ids;
static u8 tm_tr_cb_idx = -1 ;
static u8 tm_tr_volume_cb_idx = -1 ;
@@ -102,22 +112,26 @@ MODULE_PARM_DESC(logging_level,
static ushort max_sectors = 0xFFFF;
-module_param(max_sectors, ushort, 0);
+module_param(max_sectors, ushort, 0444);
MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
static int missing_delay[2] = {-1, -1};
-module_param_array(missing_delay, int, NULL, 0);
+module_param_array(missing_delay, int, NULL, 0444);
MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT3SAS_MAX_LUN (16895)
-static int max_lun = MPT3SAS_MAX_LUN;
-module_param(max_lun, int, 0);
+static u64 max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, ullong, 0444);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
-
-
+static ushort hbas_to_enumerate;
+module_param(hbas_to_enumerate, ushort, 0444);
+MODULE_PARM_DESC(hbas_to_enumerate,
+ " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
+ 1 - enumerates only SAS 2.0 generation HBAs\n \
+ 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
/* diag_buffer_enable is bitwise
* bit 0 set = TRACE
@@ -127,23 +141,41 @@ MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
* Either bit can be set, or both
*/
static int diag_buffer_enable = -1;
-module_param(diag_buffer_enable, int, 0);
+module_param(diag_buffer_enable, int, 0444);
MODULE_PARM_DESC(diag_buffer_enable,
" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
static int disable_discovery = -1;
-module_param(disable_discovery, int, 0);
+module_param(disable_discovery, int, 0444);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask = -1;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+static bool enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, bool, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd,
+ "Enable sdev max qd as can_queue, def=disabled(0)");
+
+static int multipath_on_hba = -1;
+module_param(multipath_on_hba, int, 0);
+MODULE_PARM_DESC(multipath_on_hba,
+ "Multipath support to add same target device\n\t\t"
+ "as many times as it is visible to HBA from various paths\n\t\t"
+ "(by default:\n\t\t"
+ "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
+ "\t SAS 3.5 HBA - This will be enabled)");
+
+static int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable,
+ "Shared host tagset enable/disable Default: enable(1)");
/* raid transport support */
-
static struct raid_template *mpt3sas_raid_template;
+static struct raid_template *mpt2sas_raid_template;
/**
@@ -159,21 +191,29 @@ struct sense_info {
};
#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
-#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+
+/*
+ * SAS Log info code for a NCQ collateral abort after an NCQ error:
+ * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
+ * See: drivers/message/fusion/lsi/mpi_log_sas.h
+ */
+#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000
+
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
- * @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
- * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
+ * @refcount: kref for this event
* @event_data: reply event data payload follows
*
* This object stored on ioc->fw_event_list.
@@ -181,8 +221,6 @@ struct sense_info {
struct fw_event_work {
struct list_head list;
struct work_struct work;
- u8 cancel_pending_work;
- struct delayed_work delayed_work;
struct MPT3SAS_ADAPTER *ioc;
u16 device_handle;
@@ -190,11 +228,36 @@ struct fw_event_work {
u8 VP_ID;
u8 ignore;
u16 event;
- void *event_data;
+ struct kref refcount;
+ char event_data[] __aligned(4);
};
-/* raid transport support */
-static struct raid_template *mpt3sas_raid_template;
+static void fw_event_work_free(struct kref *r)
+{
+ kfree(container_of(r, struct fw_event_work, refcount));
+}
+
+static void fw_event_work_get(struct fw_event_work *fw_work)
+{
+ kref_get(&fw_work->refcount);
+}
+
+static void fw_event_work_put(struct fw_event_work *fw_work)
+{
+ kref_put(&fw_work->refcount, fw_event_work_free);
+}
+
+static struct fw_event_work *alloc_fw_event_work(int len)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
+ if (!fw_event)
+ return NULL;
+
+ kref_init(&fw_event->refcount);
+ return fw_event;
+}
/**
* struct _scsi_io_transfer - scsi io transfer
@@ -244,35 +307,15 @@ struct _scsi_io_transfer {
u32 transfer_length;
};
-/*
- * The pci device ids are defined in mpi/mpi2_cnfg.h.
- */
-static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = {
- /* Fury ~ 3004 and 3008 */
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
- PCI_ANY_ID, PCI_ANY_ID },
- /* Invader ~ 3108 */
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
- PCI_ANY_ID, PCI_ANY_ID },
- { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
- PCI_ANY_ID, PCI_ANY_ID },
- {0} /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, scsih_pci_table);
-
/**
* _scsih_set_debug_level - global setting of ioc->logging_level.
+ * @val: value of the parameter to be set
+ * @kp: pointer to kernel_param structure
*
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
static int
-_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
@@ -281,8 +324,10 @@ _scsih_set_debug_level(const char *val, struct kernel_param *kp)
return ret;
pr_info("setting logging_level(0x%08x)\n", logging_level);
+ spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
ioc->logging_level = logging_level;
+ spin_unlock(&gioc_lock);
return 0;
}
module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
@@ -293,7 +338,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
* @sas_address: sas address
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_sas_address(u64 sas_address,
@@ -307,7 +352,7 @@ _scsih_srch_boot_sas_address(u64 sas_address,
* @device_name: device name specified in INDENTIFY fram
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_device_name(u64 device_name,
@@ -322,7 +367,7 @@ _scsih_srch_boot_device_name(u64 device_name,
* @slot_number: slot number
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
@@ -334,15 +379,96 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
}
/**
+ * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
+ * port number from port list
+ * @ioc: per adapter object
+ * @port_id: port number
+ * @bypass_dirty_port_flag: when set look the matching hba port entry even
+ * if hba port entry is marked as dirty.
+ *
+ * Search for hba port entry corresponding to provided port number,
+ * if available return port object otherwise return NULL.
+ */
+struct hba_port *
+mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
+ u8 port_id, u8 bypass_dirty_port_flag)
+{
+ struct hba_port *port, *port_next;
+
+ /*
+ * When multipath_on_hba is disabled then
+ * search the hba_port entry using default
+ * port id i.e. 255
+ */
+ if (!ioc->multipath_on_hba)
+ port_id = MULTIPATH_DISABLED_PORT_ID;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (bypass_dirty_port_flag)
+ return port;
+ if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
+ continue;
+ return port;
+ }
+
+ /*
+ * Allocate hba_port object for default port id (i.e. 255)
+ * when multipath_on_hba is disabled for the HBA.
+ * And add this object to port_table_list.
+ */
+ if (!ioc->multipath_on_hba) {
+ port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
+ if (!port)
+ return NULL;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ return port;
+ }
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
+ * @ioc: per adapter object
+ * @port: hba_port object
+ * @phy: phy number
+ *
+ * Return virtual_phy object corresponding to phy number.
+ */
+struct virtual_phy *
+mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port, u32 phy)
+{
+ struct virtual_phy *vphy, *vphy_next;
+
+ if (!port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
+ if (vphy->phy_mask & (1 << phy))
+ return vphy;
+ }
+ return NULL;
+}
+
+/**
* _scsih_is_boot_device - search for matching boot device.
* @sas_address: sas address
* @device_name: device name specified in INDENTIFY fram
* @enclosure_logical_id: enclosure logical id
- * @slot_number: slot number
+ * @slot: slot number
* @form: specifies boot device form
* @boot_device: boot device object from bios page 2
*
- * Returns 1 when there's a match, 0 means no match.
+ * Return: 1 when there's a match, 0 means no match.
*/
static int
_scsih_is_boot_device(u64 sas_address, u64 device_name,
@@ -380,10 +506,11 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name,
/**
* _scsih_get_sas_address - set the sas_address for given device handle
+ * @ioc: ?
* @handle: device handle
* @sas_address: sas address
*
- * Returns 0 success, non-zero when failure
+ * Return: 0 success, non-zero when failure
*/
static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
@@ -395,54 +522,56 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*sas_address = 0;
- if (handle <= ioc->sas_hba.num_phys) {
- *sas_address = ioc->sas_hba.sas_address;
- return 0;
- }
-
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ /* For HBA, vSES doesn't return HBA SAS address. Instead return
+ * vSES's sas address.
+ */
+ if ((handle <= ioc->sas_hba.num_phys) &&
+ (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP)))
+ *sas_address = ioc->sas_hba.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
return 0;
}
- /* we hit this becuase the given parent handle doesn't exist */
+ /* we hit this because the given parent handle doesn't exist */
if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
return -ENXIO;
/* else error case */
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
/**
* _scsih_determine_boot_device - determine boot device.
* @ioc: per adapter object
- * @device: either sas_device or raid_device object
- * @is_raid: [flag] 1 = raid object, 0 = sas object
+ * @device: sas_device or pcie_device object
+ * @channel: SAS or PCIe channel
*
* Determines whether this device should be first reported device to
* to scsi-ml or sas transport, this purpose is for persistent boot device.
* There are primary, alternate, and current entries in bios page 2. The order
* priority is primary, alternate, then current. This routine saves
- * the corresponding device object and is_raid flag in the ioc object.
+ * the corresponding device object.
* The saved data to be used later in _scsih_probe_boot_devices().
*/
static void
-_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
- void *device, u8 is_raid)
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
+ u32 channel)
{
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
u64 sas_address;
u64 device_name;
@@ -457,18 +586,24 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->bios_pg3.BiosVersion)
return;
- if (!is_raid) {
- sas_device = device;
- sas_address = sas_device->sas_address;
- device_name = sas_device->device_name;
- enclosure_logical_id = sas_device->enclosure_logical_id;
- slot = sas_device->slot;
- } else {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
sas_address = raid_device->wwid;
device_name = 0;
enclosure_logical_id = 0;
slot = 0;
+ } else if (channel == PCIE_CHANNEL) {
+ pcie_device = device;
+ sas_address = pcie_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ } else {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
}
if (!ioc->req_boot_device.device) {
@@ -477,12 +612,11 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
(ioc->bios_pg2.ReqBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_boot_device.device = device;
- ioc->req_boot_device.is_raid = is_raid;
+ ioc->req_boot_device.channel = channel;
}
}
@@ -492,12 +626,11 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
(ioc->bios_pg2.ReqAltBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedAltBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_alt_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_alt_boot_device.device = device;
- ioc->req_alt_boot_device.is_raid = is_raid;
+ ioc->req_alt_boot_device.channel = channel;
}
}
@@ -507,44 +640,212 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
(ioc->bios_pg2.CurrentBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.CurrentBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: current_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->current_boot_device.device = device;
- ioc->current_boot_device.is_raid = is_raid;
+ ioc->current_boot_device.channel = channel;
}
}
}
+static struct _sas_device *
+__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _sas_device *ret;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ ret = tgt_priv->sas_dev;
+ if (ret)
+ sas_device_get(ret);
+
+ return ret;
+}
+
+static struct _sas_device *
+mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _sas_device *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return ret;
+}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ ret = tgt_priv->pcie_dev;
+ if (ret)
+ pcie_device_get(ret);
+
+ return ret;
+}
+
+/**
+ * mpt3sas_get_pdev_from_target - pcie device search
+ * @ioc: per adapter object
+ * @tgt_priv: starget private object
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device from target, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return ret;
+}
+
+
/**
- * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search
+ * __mpt3sas_get_sdev_by_rphy - sas device search
+ * @ioc: per adapter object
+ * @rphy: sas_rphy pointer
+ *
+ * Context: This function will acquire ioc->sas_device_lock and will release
+ * before returning the sas_device object.
+ *
+ * This searches for sas_device from rphy object
+ * then return sas_device object.
+ */
+struct _sas_device *
+__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_rphy *rphy)
+{
+ struct _sas_device *sas_device;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ sas_device = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->rphy != rphy)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ return NULL;
+}
+
+/**
+ * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
+ * sas address from sas_device_list list
+ * @ioc: per adapter object
+ * @sas_address: device sas address
+ * @port: port number
+ *
+ * Search for _sas_device object corresponding to provided sas address,
+ * if available return _sas_device object address otherwise return NULL.
+ */
+struct _sas_device *
+__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
+{
+ struct _sas_device *sas_device;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&ioc->sas_device_lock);
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
+ if (sas_device->sas_address != sas_address)
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device_get(sas_device);
+ return sas_device;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_sdev_by_addr - sas device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_device_lock
*
- * This searches for sas_device based on sas_address, then return sas_device
- * object.
+ * This searches for sas_device based on sas_address & port number,
+ * then return sas_device object.
*/
struct _sas_device *
-mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ sas_address, port);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return sas_device;
+}
+
+static struct _sas_device *
+__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_device *sas_device;
+ assert_spin_locked(&ioc->sas_device_lock);
+
list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address)
- return sas_device;
+ if (sas_device->handle == handle)
+ goto found_device;
list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address)
- return sas_device;
+ if (sas_device->handle == handle)
+ goto found_device;
return NULL;
+
+found_device:
+ sas_device_get(sas_device);
+ return sas_device;
}
/**
- * _scsih_sas_device_find_by_handle - sas device search
+ * mpt3sas_get_sdev_by_handle - sas device search
* @ioc: per adapter object
* @handle: sas device handle (assigned by firmware)
* Context: Calling function should acquire ioc->sas_device_lock
@@ -552,20 +853,75 @@ mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
* This searches for sas_device based on sas_address, then return sas_device
* object.
*/
-static struct _sas_device *
-_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+struct _sas_device *
+mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_device *sas_device;
+ unsigned long flags;
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->handle == handle)
- return sas_device;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->handle == handle)
- return sas_device;
+ return sas_device;
+}
- return NULL;
+/**
+ * _scsih_display_enclosure_chassis_info - display device location info
+ * @ioc: per adapter object
+ * @sas_device: per sas device object
+ * @sdev: scsi device struct
+ * @starget: scsi target struct
+ */
+static void
+_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device, struct scsi_device *sdev,
+ struct scsi_target *starget)
+{
+ if (sdev) {
+ if (sas_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure logical id (0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else if (starget) {
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ starget_printk(KERN_INFO, starget,
+ "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else {
+ if (sas_device->enclosure_handle != 0)
+ ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ ioc_info(ioc, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ }
}
/**
@@ -574,7 +930,7 @@ _scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @sas_device: the sas_device object
* Context: This function will acquire ioc->sas_device_lock.
*
- * Removing object and freeing associated memory from the ioc->sas_device_list.
+ * If sas_device is on the list, remove it and decrement its reference count.
*/
static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
@@ -584,10 +940,20 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!sas_device)
return;
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
+ /*
+ * The lock serializes access to the list, but we still need to verify
+ * that nobody removed the entry while we were waiting on the lock.
+ */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_del(&sas_device->list);
- kfree(sas_device);
+ if (!list_empty(&sas_device->list)) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -595,8 +961,6 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
* _scsih_device_remove_by_handle - removing device object by handle
* @ioc: per adapter object
* @handle: device handle
- *
- * Return nothing.
*/
static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -608,24 +972,30 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- if (sas_device)
- list_del(&sas_device->list);
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ if (sas_device) {
_scsih_remove_device(ioc, sas_device);
+ sas_device_put(sas_device);
+ }
}
/**
- * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * mpt3sas_device_remove_by_sas_address - removing device object by
+ * sas address & port number
* @ioc: per adapter object
* @sas_address: device sas_address
+ * @port: hba port entry
*
* Return nothing.
*/
void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
struct _sas_device *sas_device;
unsigned long flags;
@@ -634,13 +1004,16 @@ mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
return;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_address);
- if (sas_device)
- list_del(&sas_device->list);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
+ if (sas_device) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ if (sas_device) {
_scsih_remove_device(ioc, sas_device);
+ sas_device_put(sas_device);
+ }
}
/**
@@ -657,17 +1030,26 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
+
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device_get(sas_device);
list_add_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives) {
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
+ sas_device->sas_address_parent, sas_device->port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
/*
@@ -678,10 +1060,12 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
_scsih_sas_device_remove(ioc, sas_device);
}
- }
+ } else
+ clear_bit(sas_device->handle, ioc->pend_os_device_add);
}
/**
@@ -698,17 +1082,349 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
+
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device_get(sas_device);
list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
_scsih_determine_boot_device(ioc, sas_device, 0);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_wwid - pcie device search
+ * @ioc: per adapter object
+ * @wwid: wwid
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on wwid, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_handle - pcie device search
+ * @ioc: per adapter object
+ * @handle: Firmware device handle
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on handle, then return pcie_device
+ * object.
+ */
+struct _pcie_device *
+mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (pcie_device->shutdown_latency) {
+ if (shutdown_latency < pcie_device->shutdown_latency)
+ shutdown_latency =
+ pcie_device->shutdown_latency;
+ }
+ }
+ ioc->max_shutdown_latency = shutdown_latency;
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_pcie_device_remove - remove pcie_device from list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * If pcie_device is on the list, remove it and decrement its reference count.
+ */
+static void
+_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
+
+ if (!pcie_device)
+ return;
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ }
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ kfree(pcie_device->serial_number);
+ pcie_device_put(pcie_device);
+ }
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
+}
+
+
+/**
+ * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ */
+static void
+_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device) {
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ pcie_device_put(pcie_device);
+ }
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ }
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
+}
+
+/**
+ * _scsih_pcie_device_add - add pcie_device object
+ * @ioc: per adapter object
+ * @pcie_device: pcie_device object
+ *
+ * This is added to the pcie_device_list link list.
+ */
+static void
+_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ return;
+ }
+ if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ } else if (!pcie_device->starget) {
+ if (!ioc->is_driver_loading) {
+/*TODO-- Need to find out whether this condition will occur or not*/
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ }
+ } else
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+}
+
+/*
+ * _scsih_pcie_device_init_add - insert pcie_device to the init list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->pcie_device_init_list.
+ */
+static void
+_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
+ if (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
+ _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
/**
* _scsih_raid_device_find_by_id - raid device search
* @ioc: per adapter object
@@ -737,7 +1453,7 @@ _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
}
/**
- * _scsih_raid_device_find_by_handle - raid device search
+ * mpt3sas_raid_device_find_by_handle - raid device search
* @ioc: per adapter object
* @handle: sas device handle (assigned by firmware)
* Context: Calling function should acquire ioc->raid_device_lock
@@ -745,8 +1461,8 @@ _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
* This searches for raid_device based on handle, then return raid_device
* object.
*/
-static struct _raid_device *
-_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+struct _raid_device *
+mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _raid_device *raid_device, *r;
@@ -765,7 +1481,7 @@ _scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* _scsih_raid_device_find_by_wwid - raid device search
* @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
+ * @wwid: ?
* Context: Calling function should acquire ioc->raid_device_lock
*
* This searches for raid_device based on wwid, then return raid_device
@@ -801,9 +1517,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- raid_device->handle, (unsigned long long)raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ raid_device->handle, (u64)raid_device->wwid));
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -854,24 +1571,53 @@ mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
+ * @ioc: per adapter object
+ * @handle: enclosure handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ */
+static struct _enclosure_node *
+mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _enclosure_node *enclosure_dev, *r;
+
+ r = NULL;
+ list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+/**
* mpt3sas_scsih_expander_find_by_sas_address - expander device search
* @ioc: per adapter object
* @sas_address: sas address
+ * @port: hba port entry
* Context: Calling function should acquire ioc->sas_node_lock.
*
- * This searches for expander device based on sas_address, then returns the
- * sas_node object.
+ * This searches for expander device based on sas_address & port number,
+ * then returns the sas_node object.
*/
struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
- u64 sas_address)
+ u64 sas_address, struct hba_port *port)
{
- struct _sas_node *sas_expander, *r;
+ struct _sas_node *sas_expander, *r = NULL;
+
+ if (!port)
+ return r;
- r = NULL;
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
r = sas_expander;
goto out;
}
@@ -886,8 +1632,6 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
* Context: This function will acquire ioc->sas_node_lock.
*
* Adding new object to the ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
@@ -905,7 +1649,7 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Returns 1 if end device.
+ * Return: 1 if end device.
*/
static int
_scsih_is_end_device(u32 device_info)
@@ -920,69 +1664,23 @@ _scsih_is_end_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = ioc->scsi_lookup[smid - 1].scmd;
- ioc->scsi_lookup[smid - 1].scmd = NULL;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
+ * _scsih_is_nvme_pciescsi_device - determines if
+ * device is an pcie nvme/scsi device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
*
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
+ * Returns 1 if device is pcie device type nvme/scsi.
*/
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
+static int
+_scsih_is_nvme_pciescsi_device(u32 device_info)
{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
+ if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_NVME) ||
+ ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_SCSI))
+ return 1;
+ else
+ return 0;
}
/**
@@ -999,23 +1697,19 @@ static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
int channel)
{
- u8 found;
- unsigned long flags;
- int i;
+ int smid;
+ struct scsi_cmnd *scmd;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
+ for (smid = 1;
+ smid <= ioc->shost->can_queue; smid++) {
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel)
+ return 1;
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return 0;
}
/**
@@ -1033,29 +1727,73 @@ static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
unsigned int lun, int channel)
{
- u8 found;
- unsigned long flags;
- int i;
+ int smid;
+ struct scsi_cmnd *scmd;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
- }
+ for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return 1;
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return 0;
}
+/**
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return: the smid stored scmd pointer.
+ * Then will dereference the stored scmd pointer.
+ */
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
+ Mpi25SCSIIORequest_t *mpi_request;
+ u16 tag = smid - 1;
-static void
-_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag =
+ ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /*
+ * If SCSI IO request is outstanding at driver level then
+ * DevHandle filed must be non-zero. If DevHandle is zero
+ * then it means that this smid is free at driver level,
+ * so return NULL.
+ */
+ if (!mpi_request->DevHandle)
+ return scmd;
+
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF || st->smid == 0)
+ scmd = NULL;
+ }
+ }
+ return scmd;
+}
+
+/**
+ * scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Return: queue depth.
+ */
+static int
+scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
struct Scsi_Host *shost = sdev->host;
int max_depth;
@@ -1067,7 +1805,13 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = shost->can_queue;
- /* limit max device queue for SATA to 32 */
+ /*
+ * limit max device queue for SATA to 32 if enable_sdev_max_qd
+ * is disabled.
+ */
+ if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
+ goto not_sata;
+
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
goto not_sata;
@@ -1076,12 +1820,15 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
goto not_sata;
if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
goto not_sata;
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device_priv_data->sas_target->sas_address);
- if (sas_device && sas_device->device_info &
- MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
- max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+ sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
+ if (sas_device) {
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
not_sata:
@@ -1090,80 +1837,54 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+ return sdev->queue_depth;
}
/**
- * _scsih_change_queue_depth - setting device queue depth
+ * mpt3sas_scsih_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
* @qdepth: requested queue depth
- * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
- * (see include/scsi/scsi_host.h for definition)
*
- * Returns queue depth.
+ * Returns nothing.
*/
-static int
-_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+void
+mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
- if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
- _scsih_adjust_queue_depth(sdev, qdepth);
- else if (reason == SCSI_QDEPTH_QFULL)
- scsi_track_queue_full(sdev, qdepth);
- else
- return -EOPNOTSUPP;
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- if (sdev->inquiry_len > 7)
- sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
- "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
- sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
- sdev->ordered_tags, sdev->scsi_level,
- (sdev->inquiry[7] & 2) >> 1);
+ if (ioc->enable_sdev_max_qd)
+ qdepth = shost->can_queue;
- return sdev->queue_depth;
+ scsih_change_queue_depth(sdev, qdepth);
}
/**
- * _scsih_change_queue_type - changing device queue tag type
- * @sdev: scsi device struct
- * @tag_type: requested tag type
- *
- * Returns queue tag type.
- */
-static int
-_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
- if (sdev->tagged_supported) {
- scsi_set_tag_type(sdev, tag_type);
- if (tag_type)
- scsi_activate_tcq(sdev, sdev->queue_depth);
- else
- scsi_deactivate_tcq(sdev, sdev->queue_depth);
- } else
- tag_type = 0;
-
- return tag_type;
-}
-
-
-/**
- * _scsih_target_alloc - target add routine
+ * scsih_target_alloc - target add routine
* @starget: scsi target struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-_scsih_target_alloc(struct scsi_target *starget)
+scsih_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
struct sas_rphy *rphy;
- sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
+ GFP_KERNEL);
if (!sas_target_priv_data)
return -ENOMEM;
@@ -1180,21 +1901,47 @@ _scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->handle = raid_device->handle;
sas_target_priv_data->sas_address = raid_device->wwid;
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ if (ioc->is_warpdrive)
+ sas_target_priv_data->raid_device = raid_device;
raid_device->starget = starget;
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
return 0;
}
+ /* PCIe devices */
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
+ starget->channel);
+ if (pcie_device) {
+ sas_target_priv_data->handle = pcie_device->handle;
+ sas_target_priv_data->sas_address = pcie_device->wwid;
+ sas_target_priv_data->port = NULL;
+ sas_target_priv_data->pcie_dev = pcie_device;
+ pcie_device->starget = starget;
+ pcie_device->id = starget->id;
+ pcie_device->channel = starget->channel;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_PCIE_DEVICE;
+ if (pcie_device->fast_path)
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return 0;
+ }
+
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_target_priv_data->port = sas_device->port;
+ sas_target_priv_data->sas_dev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
sas_device->channel = starget->channel;
@@ -1202,7 +1949,8 @@ _scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->flags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
if (sas_device->fast_path)
- sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1210,21 +1958,19 @@ _scsih_target_alloc(struct scsi_target *starget)
}
/**
- * _scsih_target_destroy - target destroy routine
+ * scsih_target_destroy - target destroy routine
* @starget: scsi target struct
- *
- * Returns nothing.
*/
static void
-_scsih_target_destroy(struct scsi_target *starget)
+scsih_target_destroy(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
- struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1242,15 +1988,43 @@ _scsih_target_destroy(struct scsi_target *starget)
goto out;
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && (pcie_device->starget == starget) &&
+ (pcie_device->id == starget->id) &&
+ (pcie_device->channel == starget->channel))
+ pcie_device->starget = NULL;
+
+ if (pcie_device) {
+ /*
+ * Corresponding get() is in _scsih_target_alloc()
+ */
+ sas_target_priv_data->pcie_dev = NULL;
+ pcie_device_put(pcie_device);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ goto out;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- rphy->identify.sas_address);
+ sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
(sas_device->channel == starget->channel))
sas_device->starget = NULL;
+ if (sas_device) {
+ /*
+ * Corresponding get() is in _scsih_target_alloc()
+ */
+ sas_target_priv_data->sas_dev = NULL;
+ sas_device_put(sas_device);
+
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
out:
@@ -1259,14 +2033,14 @@ _scsih_target_destroy(struct scsi_target *starget)
}
/**
- * _scsih_slave_alloc - device add routine
+ * scsih_sdev_init - device add routine
* @sdev: scsi device struct
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-_scsih_slave_alloc(struct scsi_device *sdev)
+scsih_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
@@ -1275,9 +2049,11 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct scsi_target *starget;
struct _raid_device *raid_device;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
- sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
+ GFP_KERNEL);
if (!sas_device_priv_data)
return -ENOMEM;
@@ -1302,17 +2078,36 @@ _scsih_slave_alloc(struct scsi_device *sdev)
raid_device->sdev = sdev; /* raid is single lun */
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_target_priv_data->sas_address);
+ if (pcie_device && (pcie_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : pcie_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ pcie_device->starget = starget;
+ }
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_target_priv_data->sas_address);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ sas_target_priv_data->sas_address,
+ sas_target_priv_data->port);
if (sas_device && (sas_device->starget == NULL)) {
sdev_printk(KERN_INFO, sdev,
"%s : sas_device->starget set to starget @ %d\n",
- __func__, __LINE__);
+ __func__, __LINE__);
sas_device->starget = starget;
}
+
+ if (sas_device)
+ sas_device_put(sas_device);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1320,19 +2115,18 @@ _scsih_slave_alloc(struct scsi_device *sdev)
}
/**
- * _scsih_slave_destroy - device destroy routine
+ * scsih_sdev_destroy - device destroy routine
* @sdev: scsi device struct
- *
- * Returns nothing.
*/
static void
-_scsih_slave_destroy(struct scsi_device *sdev)
+scsih_sdev_destroy(struct scsi_device *sdev)
{
struct MPT3SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
if (!sdev->hostdata)
@@ -1345,12 +2139,27 @@ _scsih_slave_destroy(struct scsi_device *sdev)
shost = dev_to_shost(&starget->dev);
ioc = shost_priv(shost);
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && !sas_target_priv_data->num_luns)
+ pcie_device->starget = NULL;
+
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_target_priv_data->sas_address);
+ sas_device = __mpt3sas_get_sdev_from_target(ioc,
+ sas_target_priv_data);
if (sas_device && !sas_target_priv_data->num_luns)
sas_device->starget = NULL;
+
+ if (sas_device)
+ sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1376,16 +2185,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -1407,28 +2216,39 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
/*
* raid transport support -
* Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
* raid_class_release() is not cleaning up properly.
*/
/**
- * _scsih_is_raid - return boolean indicating device is raid volume
- * @dev the device struct object
+ * scsih_is_raid - return boolean indicating device is raid volume
+ * @dev: the device struct object
*/
static int
-_scsih_is_raid(struct device *dev)
+scsih_is_raid(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ if (ioc->is_warpdrive)
+ return 0;
return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
}
+static int
+scsih_is_nvme(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
+}
+
/**
- * _scsih_get_resync - get raid volume resync percent complete
- * @dev the device struct object
+ * scsih_get_resync - get raid volume resync percent complete
+ * @dev: the device struct object
*/
static void
-_scsih_get_resync(struct device *dev)
+scsih_get_resync(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
@@ -1442,6 +2262,9 @@ _scsih_get_resync(struct device *dev)
percent_complete = 0;
handle = 0;
+ if (ioc->is_warpdrive)
+ goto out;
+
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
sdev->channel);
@@ -1457,8 +2280,8 @@ _scsih_get_resync(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
percent_complete = 0;
goto out;
}
@@ -1469,15 +2292,24 @@ _scsih_get_resync(struct device *dev)
percent_complete = 0;
out:
- raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
+ raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
+ raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+ break;
+ }
}
/**
- * _scsih_get_state - get raid volume level
- * @dev the device struct object
+ * scsih_get_state - get raid volume level
+ * @dev: the device struct object
*/
static void
-_scsih_get_state(struct device *dev)
+scsih_get_state(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
@@ -1502,8 +2334,8 @@ _scsih_get_state(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -1527,16 +2359,26 @@ _scsih_get_state(struct device *dev)
break;
}
out:
- raid_set_state(mpt3sas_raid_template, dev, state);
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
+ raid_set_state(mpt2sas_raid_template, dev, state);
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
+ raid_set_state(mpt3sas_raid_template, dev, state);
+ break;
+ }
}
/**
* _scsih_set_level - set raid level
+ * @ioc: ?
* @sdev: scsi device struct
* @volume_type: volume type
*/
static void
-_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_device *sdev, u8 volume_type)
{
enum raid_level level = RAID_LEVEL_UNKNOWN;
@@ -1555,16 +2397,26 @@ _scsih_set_level(struct scsi_device *sdev, u8 volume_type)
break;
}
- raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level);
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
+ raid_set_level(mpt2sas_raid_template,
+ &sdev->sdev_gendev, level);
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
+ raid_set_level(mpt3sas_raid_template,
+ &sdev->sdev_gendev, level);
+ break;
+ }
}
/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
- * @sas_device: the raid_device object
+ * @raid_device: the raid_device object
*
- * Returns 0 for success, else 1
+ * Return: 0 for success, else 1
*/
static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
@@ -1579,28 +2431,27 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
raid_device->num_pds = num_pds;
- sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
- sizeof(Mpi2RaidVol0PhysDisk_t));
+ sz = struct_size(vol_pg0, PhysDisk, num_pds);
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
kfree(vol_pg0);
return 1;
}
@@ -1625,8 +2476,6 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
return 0;
}
-
-
/**
* _scsih_enable_tlr - setting TLR flags
* @ioc: per adapter object
@@ -1655,20 +2504,22 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
}
/**
- * _scsih_slave_configure - device configure routine.
+ * scsih_sdev_configure - device configure routine.
* @sdev: scsi device struct
+ * @lim: queue limits
*
- * Returns 0 if ok. Any other return is assumed to be an error and
+ * Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-_scsih_slave_configure(struct scsi_device *sdev)
+scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
unsigned long flags;
int qdepth;
@@ -1689,22 +2540,26 @@ _scsih_slave_configure(struct scsi_device *sdev)
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (_scsih_get_volume_capabilities(ioc, raid_device)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
+ /*
+ * WARPDRIVE: Initialize the required data for Direct IO
+ */
+ mpt3sas_init_warpdrive_properties(ioc, raid_device);
/* RAID Queue Depth Support
* IS volume = underlying qdepth of drive type, either
@@ -1717,7 +2572,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
- if (raid_device->device_info &
+ if (raid_device->device_info &
MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
ds = "SATA";
else
@@ -1753,17 +2608,26 @@ _scsih_slave_configure(struct scsi_device *sdev)
break;
}
- sdev_printk(KERN_INFO, sdev,
- "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
- r_level, raid_device->handle,
- (unsigned long long)raid_device->wwid,
- raid_device->num_pds, ds);
-
+ if (!ioc->hide_ir_msg)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx),"
+ " pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+
+ if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
+ lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
+ sdev_printk(KERN_INFO, sdev,
+ "Set queue's max_sector to: %u\n",
+ MPT3SAS_RAID_MAX_SECTORS);
+ }
- _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
-/* raid transport support */
- _scsih_set_level(sdev, raid_device->volume_type);
+ /* raid transport support */
+ if (!ioc->is_warpdrive)
+ _scsih_set_level(ioc, sdev, raid_device->volume_type);
return 0;
}
@@ -1771,39 +2635,91 @@ _scsih_slave_configure(struct scsi_device *sdev)
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
if (mpt3sas_config_get_volume_handle(ioc, handle,
&volume_handle)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
volume_handle, &volume_wwid)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
}
+ /* PCIe handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+
+ qdepth = ioc->max_nvme_qd;
+ ds = "NVMe";
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ ds, handle, (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure logical id(0x%016llx), slot(%d)\n",
+ ds,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure level(0x%04x),"
+ "connector name( %s)\n", ds,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ if (pcie_device->nvme_mdts)
+ lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
+
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ lim->virt_boundary_mask = ioc->page_size - 1;
+ return 0;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device_priv_data->sas_target->sas_address);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ sas_device_priv_data->sas_target->sas_address,
+ sas_device_priv_data->sas_target->port);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
sas_device->volume_handle = volume_handle;
sas_device->volume_wwid = volume_wwid;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
ssp_target = 1;
- ds = "SSP";
+ if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SEP) {
+ sdev_printk(KERN_INFO, sdev,
+ "set ignore_delay_remove for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->ignore_delay_remove = 1;
+ ds = "SES";
+ } else
+ ds = "SSP";
} else {
- qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ qdepth = ioc->max_sata_qd;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
ds = "STP";
else if (sas_device->device_info &
@@ -1815,18 +2731,17 @@ _scsih_slave_configure(struct scsi_device *sdev)
"sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
ds, handle, (unsigned long long)sas_device->sas_address,
sas_device->phy, (unsigned long long)sas_device->device_name);
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
- ds, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
+
+ sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (!ssp_target)
_scsih_display_sata_capabilities(ioc, handle, sdev);
- _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
if (ssp_target) {
sas_read_port_mode_page(sdev);
@@ -1837,19 +2752,17 @@ _scsih_slave_configure(struct scsi_device *sdev)
}
/**
- * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * scsih_bios_param - fetch head, sector, cylinder info for a disk
* @sdev: scsi device struct
- * @bdev: pointer to block device context
+ * @unused: pointer to gendisk
* @capacity: device size (in 512 byte sectors)
* @params: three element array to place output:
* params[0] number of heads (max 255)
* params[1] number of sectors (max 63)
* params[2] number of cylinders
- *
- * Return nothing.
*/
static int
-_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+scsih_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int params[])
{
int heads;
@@ -1888,8 +2801,6 @@ _scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
* _scsih_response_code - translation of device response code
* @ioc: per adapter object
* @response_code: response code returned by the device
- *
- * Return nothing.
*/
static void
_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
@@ -1925,8 +2836,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
desc = "unknown";
break;
}
- pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
- ioc->name, response_code, desc);
+ ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
}
/**
@@ -1939,8 +2849,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
*
* The callback handler when using scsih_issue_tm.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -1951,7 +2861,6 @@ _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
return 1;
if (ioc->tm_cmds.smid != smid)
return 1;
- mpt3sas_base_flush_reply_queues(ioc);
ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (mpi_reply) {
@@ -2020,88 +2929,171 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
+ * @ioc: per adapter object
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Look whether TM has aborted the timed out SCSI command, if
+ * TM has aborted the IO then return SUCCESS else return FAILED.
+ */
+static int
+scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task)
+{
+
+ if (smid_task <= ioc->shost->can_queue) {
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!(_scsih_scsi_lookup_find_by_target(ioc,
+ id, channel)))
+ return SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
+ lun, channel)))
+ return SUCCESS;
+ break;
+ default:
+ return SUCCESS;
+ }
+ } else if (smid_task == ioc->scsih_cmds.smid) {
+ if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ } else if (smid_task == ioc->ctl_cmds.smid) {
+ if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ }
+
+ return FAILED;
+}
+
+/**
+ * scsih_tm_post_processing - post processing of target & LUN reset
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Post processing of target & LUN reset. Due to interrupt latency
+ * issue it possible that interrupt for aborted IO might not be
+ * received yet. So before returning failure status, poll the
+ * reply descriptor pools for the reply of timed out SCSI command.
+ * Return FAILED status if reply for timed out is not received
+ * otherwise return SUCCESS.
+ */
+static int
+scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task)
+{
+ int rc;
+
+ rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ ioc_info(ioc,
+ "Poll ReplyDescriptor queues for completion of"
+ " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
+ smid_task, type, handle);
+
+ /*
+ * Due to interrupt latency issues, driver may receive interrupt for
+ * TM first and then for aborted SCSI IO command. So, poll all the
+ * ReplyDescriptor pools before returning the FAILED status to SML.
+ */
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 1);
+ mpt3sas_base_unmask_interrupts(ioc);
+
+ return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+}
+
+/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
+ * @handle: device handle
* @channel: the channel assigned by the OS
* @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
- * @serial_number: the serial_number from scmd
- * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * @tr_method: Target Reset Method
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
- * Return SUCCESS or FAILED.
+ * Return: SUCCESS or FAILED.
*/
int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
- unsigned long serial_number, enum mutex_type m_type)
+ uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi25SCSIIORequest_t *request;
u16 smid = 0;
u32 ioc_state;
- unsigned long timeleft;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
+ u8 issue_reset = 0;
+
+ lockdep_assert_held(&ioc->tm_cmds.mutex);
- if (m_type == TM_MUTEX_ON)
- mutex_lock(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
- pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
- __func__, ioc->name);
- rc = FAILED;
- goto err_out;
+ ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
+ return FAILED;
}
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
- rc = FAILED;
- goto err_out;
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
+ return FAILED;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
- rc = (!rc) ? SUCCESS : FAILED;
- goto err_out;
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
- rc = (!rc) ? SUCCESS : FAILED;
- goto err_out;
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
}
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = FAILED;
- goto err_out;
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
+ return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
- ioc->name, handle, type, smid_task));
+ dtmprintk(ioc,
+ ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+ handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@@ -2110,35 +3102,38 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
- timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ ioc->put_smid_hi_priority(ioc, smid, msix_task);
+ wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- _debug_dump_mf(mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4);
- if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+ if (issue_reset) {
+ rc = mpt3sas_base_hard_reset_handler(ioc,
+ FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
- ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
- mpt3sas_scsih_clear_tm_flag(ioc, handle);
- goto err_out;
+ goto out;
}
}
+ /* sync IRQs in case those were busy during flush. */
+ mpt3sas_base_sync_reply_irqs(ioc, 0);
+
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
mpi_reply = ioc->tm_cmds.reply;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
- "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dtmprintk(ioc,
+ ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
if (mpi_reply->IOCStatus)
@@ -2150,23 +3145,32 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
switch (type) {
case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
+ /*
+ * If DevHandle filed in smid_task's entry of request pool
+ * doesn't match with device handle on which this task abort
+ * TM is received then it means that TM has successfully
+ * aborted the timed out command. Since smid_task's entry in
+ * request pool will be memset to zero once the timed out
+ * command is returned to the SML. If the command is not
+ * aborted then smid_task’s entry won’t be cleared and it
+ * will have same DevHandle value on which this task abort TM
+ * is received and driver will return the TM status as FAILED.
+ */
+ request = mpt3sas_base_get_msg_frame(ioc, smid_task);
+ if (le16_to_cpu(request->DevHandle) != handle)
break;
+
+ ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
+ "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
+ handle, timeout, tr_method, smid_task, msix_task);
rc = FAILED;
break;
case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
+ rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
+ type, smid_task);
break;
case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
rc = SUCCESS;
@@ -2176,17 +3180,24 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
break;
}
+out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
- if (m_type == TM_MUTEX_ON)
- mutex_unlock(&ioc->tm_cmds.mutex);
-
return rc;
+}
- err_out:
- if (m_type == TM_MUTEX_ON)
- mutex_unlock(&ioc->tm_cmds.mutex);
- return rc;
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method)
+{
+ int ret;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+ smid_task, msix_task, timeout, tr_method);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return ret;
}
/**
@@ -2202,12 +3213,16 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
unsigned long flags;
char *device_str = NULL;
if (!priv_target)
return;
- device_str = "volume";
+ if (ioc->hide_ir_msg)
+ device_str = "WarpDrive";
+ else
+ device_str = "volume";
scsi_print_command(scmd);
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
@@ -2215,10 +3230,34 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
"%s handle(0x%04x), %s wwid(0x%016llx)\n",
device_str, priv_target->handle,
device_str, (unsigned long long)priv_target->sas_address);
+
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- priv_target->sas_address);
+ sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
if (priv_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
@@ -2233,47 +3272,54 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
sas_device->handle,
(unsigned long long)sas_device->sas_address,
sas_device->phy);
- starget_printk(KERN_INFO, starget,
- "enclosure_logical_id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device,
+ NULL, starget);
+
+ sas_device_put(sas_device);
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
}
/**
- * _scsih_abort - eh threads main abort routine
+ * scsih_abort - eh threads main abort routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
-_scsih_abort(struct scsi_cmnd *scmd)
+scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
- sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p)\n", scmd);
+ u8 timeout = 30;
+ struct _pcie_device *pcie_device = NULL;
+ sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+ "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+ scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
+ sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+ "driver, assuming scmd(0x%p) might have completed\n", scmd);
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -2291,43 +3337,57 @@ _scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_ON);
-
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
+ timeout = ioc->nvme_abort_timeout;
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, timeout, 0);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
return r;
}
/**
- * _scsih_dev_reset - eh threads main device reset routine
+ * scsih_dev_reset - eh threads main device reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
-_scsih_dev_reset(struct scsi_cmnd *scmd)
+scsih_dev_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- struct _sas_device *sas_device;
- unsigned long flags;
+ struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
u16 handle;
+ u8 tr_method = 0;
+ u8 tr_timeout = 30;
int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
+
sdev_printk(KERN_INFO, scmd->device,
- "attempting device reset! scmd(%p)\n", scmd);
+ "attempting device reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -2336,12 +3396,10 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
handle = 0;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc,
- sas_device_priv_data->sas_target->handle);
+ sas_device = mpt3sas_get_sdev_from_target(ioc,
+ target_priv_data);
if (sas_device)
handle = sas_device->volume_handle;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
} else
handle = sas_device_priv_data->sas_target->handle;
@@ -2351,44 +3409,65 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
- TM_MUTEX_ON);
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
+ tr_timeout = pcie_device->reset_timeout;
+ tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ } else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
+ tr_timeout, tr_method);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && scsi_device_busy(scmd->device))
+ r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ if (sas_device)
+ sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
return r;
}
/**
- * _scsih_target_reset - eh threads main target reset routine
+ * scsih_target_reset - eh threads main target reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
-_scsih_target_reset(struct scsi_cmnd *scmd)
+scsih_target_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- struct _sas_device *sas_device;
- unsigned long flags;
+ struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
u16 handle;
+ u8 tr_method = 0;
+ u8 tr_timeout = 30;
int r;
struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
- starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "attempting target reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
- scmd);
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
+ starget_printk(KERN_INFO, starget,
+ "target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
r = SUCCESS;
goto out;
}
@@ -2397,12 +3476,10 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
handle = 0;
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_RAID_COMPONENT) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc,
- sas_device_priv_data->sas_target->handle);
+ sas_device = mpt3sas_get_sdev_from_target(ioc,
+ target_priv_data);
if (sas_device)
handle = sas_device->volume_handle;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
} else
handle = sas_device_priv_data->sas_target->handle;
@@ -2412,38 +3489,59 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30, 0, TM_MUTEX_ON);
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
+ tr_timeout = pcie_device->reset_timeout;
+ tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ } else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
+ tr_timeout, tr_method);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
- starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ if (sas_device)
+ sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
return r;
}
/**
- * _scsih_host_reset - eh threads main host reset routine
+ * scsih_host_reset - eh threads main host reset routine
* @scmd: pointer to scsi command object
*
- * Returns SUCCESS if command aborted else FAILED
+ * Return: SUCCESS if command aborted else FAILED
*/
static int
-_scsih_host_reset(struct scsi_cmnd *scmd)
+scsih_host_reset(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
- ioc->name, scmd);
+ ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
scsi_print_command(scmd);
- retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ if (ioc->is_driver_loading || ioc->remove_host) {
+ ioc_info(ioc, "Blocking the host reset\n");
+ r = FAILED;
+ goto out;
+ }
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
- pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
- ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+out:
+ ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
+ r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
}
@@ -2456,8 +3554,6 @@ _scsih_host_reset(struct scsi_cmnd *scmd)
*
* This adds the firmware event object into link list, then queues it up to
* be processed from user context.
- *
- * Return nothing.
*/
static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -2468,33 +3564,34 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
return;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ fw_event_work_get(fw_event);
INIT_LIST_HEAD(&fw_event->list);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
INIT_WORK(&fw_event->work, _firmware_event_work);
+ fw_event_work_get(fw_event);
queue_work(ioc->firmware_event_thread, &fw_event->work);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
- * _scsih_fw_event_free - delete fw_event
+ * _scsih_fw_event_del_from_list - delete fw_event from the list
* @ioc: per adapter object
* @fw_event: object describing the event
* Context: This function will acquire ioc->fw_event_lock.
*
- * This removes firmware event object from link list, frees associated memory.
- *
- * Return nothing.
+ * If the fw_event is on the fw_event_list, remove it and do a put.
*/
static void
-_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
*fw_event)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
- list_del(&fw_event->list);
- kfree(fw_event->event_data);
- kfree(fw_event);
+ if (!list_empty(&fw_event->list)) {
+ list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
+ }
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2503,67 +3600,78 @@ _scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
* mpt3sas_send_trigger_data_event - send event for processing trigger data
* @ioc: per adapter object
* @event_data: trigger event data
- *
- * Return nothing.
*/
void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{
struct fw_event_work *fw_event;
+ u16 sz;
if (ioc->is_driver_loading)
return;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ sz = sizeof(*event_data);
+ fw_event = alloc_fw_event_work(sz);
if (!fw_event)
return;
- fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC);
- if (!fw_event->event_data)
- return;
fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
fw_event->ioc = ioc;
memcpy(fw_event->event_data, event_data, sizeof(*event_data));
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
}
/**
* _scsih_error_recovery_delete_devices - remove devices not responding
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (ioc->is_driver_loading)
- return;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
}
/**
* mpt3sas_port_enable_complete - port enable completed (fake event)
* @ioc: per adapter object
- *
- * Return nothing.
*/
void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
+}
+
+static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct fw_event_work *fw_event = NULL;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ if (!list_empty(&ioc->fw_event_list)) {
+ fw_event = list_first_entry(&ioc->fw_event_list,
+ struct fw_event_work, list);
+ list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
+ return fw_event;
}
/**
@@ -2573,23 +3681,144 @@ mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
*
- * Return nothing.
+ * Context: task, can sleep
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
{
- struct fw_event_work *fw_event, *next;
+ struct fw_event_work *fw_event;
- if (list_empty(&ioc->fw_event_list) ||
- !ioc->firmware_event_thread || in_interrupt())
+ if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
+ !ioc->firmware_event_thread)
return;
+ /*
+ * Set current running event as ignore, so that
+ * current running event will exit quickly.
+ * As diag reset has occurred it is of no use
+ * to process remaining stale event data entries.
+ */
+ if (ioc->shost_recovery && ioc->current_event)
+ ioc->current_event->ignore = 1;
+
+ ioc->fw_events_cleanup = 1;
+ while ((fw_event = dequeue_next_fw_event(ioc)) ||
+ (fw_event = ioc->current_event)) {
- list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work(&fw_event->delayed_work)) {
- _scsih_fw_event_free(ioc, fw_event);
+ /*
+ * Don't call cancel_work_sync() for current_event
+ * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ * otherwise we may observe deadlock if current
+ * hard reset issued as part of processing the current_event.
+ *
+ * Orginal logic of cleaning the current_event is added
+ * for handling the back to back host reset issued by the user.
+ * i.e. during back to back host reset, driver use to process
+ * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
+ * event back to back and this made the drives to unregister
+ * the devices from SML.
+ */
+
+ if (fw_event == ioc->current_event &&
+ ioc->current_event->event !=
+ MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
+ ioc->current_event = NULL;
continue;
}
- fw_event->cancel_pending_work = 1;
+
+ /*
+ * Driver has to clear ioc->start_scan flag when
+ * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
+ * otherwise scsi_scan_host() API waits for the
+ * 5 minute timer to expire. If we exit from
+ * scsi_scan_host() early then we can issue the
+ * new port enable request as part of current diag reset.
+ */
+ if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ ioc->start_scan = 0;
+ }
+
+ /*
+ * Wait on the fw_event to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the work had on the fw_event.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from _firmware_event_work()
+ */
+ if (cancel_work_sync(&fw_event->work))
+ fw_event_work_put(fw_event);
+
+ }
+ ioc->fw_events_cleanup = 0;
+}
+
+/**
+ * _scsih_internal_device_block - block the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ *
+ * make sure device is blocked without error, if not
+ * print an error
+ */
+static void
+_scsih_internal_device_block(struct scsi_device *sdev,
+ struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+ int r = 0;
+
+ sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 1;
+
+ r = scsi_internal_device_block_nowait(sdev);
+ if (r == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "device_block failed with return(%d) for handle(0x%04x)\n",
+ r, sas_device_priv_data->sas_target->handle);
+}
+
+/**
+ * _scsih_internal_device_unblock - unblock the sdev device
+ * @sdev: per device object
+ * @sas_device_priv_data : per device driver private data
+ * make sure device is unblocked without error, if not retry
+ * by blocking and then unblocking
+ */
+
+static void
+_scsih_internal_device_unblock(struct scsi_device *sdev,
+ struct MPT3SAS_DEVICE *sas_device_priv_data)
+{
+ int r = 0;
+
+ sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
+ "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 0;
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (r == -EINVAL) {
+ /* The device has been set to SDEV_RUNNING by SD layer during
+ * device addition but the request queue is still stopped by
+ * our earlier block call. We need to perform a block again
+ * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
+
+ sdev_printk(KERN_WARNING, sdev,
+ "device_unblock failed with return(%d) for handle(0x%04x) "
+ "performing a block followed by an unblock\n",
+ r, sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->block = 1;
+ r = scsi_internal_device_block_nowait(sdev);
+ if (r)
+ sdev_printk(KERN_WARNING, sdev, "retried device_block "
+ "failed with return(%d) for handle(0x%04x)\n",
+ r, sas_device_priv_data->sas_target->handle);
+
+ sas_device_priv_data->block = 0;
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (r)
+ sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
+ " failed with return(%d) for handle(0x%04x)\n",
+ r, sas_device_priv_data->sas_target->handle);
}
}
@@ -2612,11 +3841,10 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
if (!sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 0;
dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
"device_running, handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle));
- scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ _scsih_internal_device_unblock(sdev, sas_device_priv_data);
}
}
@@ -2624,36 +3852,38 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_ublock_io_device - prepare device to be deleted
* @ioc: per adapter object
- * @sas_addr: sas address
+ * @sas_address: sas address
+ * @port: hba port entry
*
* unblock then put device in offline state
*/
static void
-_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct hba_port *port)
{
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)
continue;
- if (sas_device_priv_data->block) {
- sas_device_priv_data->block = 0;
- scsi_internal_device_unblock(sdev, SDEV_RUNNING);
- }
+ if (sas_device_priv_data->sas_target->port != port)
+ continue;
+ if (sas_device_priv_data->block)
+ _scsih_internal_device_unblock(sdev,
+ sas_device_priv_data);
}
}
/**
* _scsih_block_io_all_device - set the device state to SDEV_BLOCK
* @ioc: per adapter object
- * @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2667,10 +3897,13 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
continue;
if (sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 1;
- scsi_internal_device_block(sdev);
- sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
+ _scsih_internal_device_block(sdev, sas_device_priv_data);
}
}
@@ -2679,13 +3912,16 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @handle: device handle
*
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
*/
static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
+ struct _sas_device *sas_device;
+
+ sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
@@ -2695,11 +3931,19 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device_priv_data->block)
continue;
- sas_device_priv_data->block = 1;
- scsi_internal_device_block(sdev);
- sdev_printk(KERN_INFO, sdev,
- "device_blocked, handle(0x%04x)\n", handle);
+ if (sas_device && sas_device->pend_sas_rphy_add)
+ continue;
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
+ _scsih_internal_device_block(sdev, sas_device_priv_data);
}
+
+ if (sas_device)
+ sas_device_put(sas_device);
}
/**
@@ -2728,12 +3972,14 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device =
- mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
- if (sas_device)
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ if (sas_device) {
set_bit(sas_device->handle,
- ioc->blocking_handles);
+ ioc->blocking_handles);
+ sas_device_put(sas_device);
+ }
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
}
@@ -2747,7 +3993,8 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
SAS_FANOUT_EXPANDER_DEVICE) {
expander_sibling =
mpt3sas_scsih_expander_find_by_sas_address(
- ioc, mpt3sas_port->remote_identify.sas_address);
+ ioc, mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
_scsih_block_io_to_children_attached_to_ex(ioc,
expander_sibling);
}
@@ -2782,6 +4029,33 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_block_io_to_pcie_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull/reconnect.
+ */
+static void
+_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code ==
+ MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+/**
* _scsih_tm_tr_send - send task management request
* @ioc: per adapter object
* @handle: device handle
@@ -2801,31 +4075,27 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
u16 smid;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
u64 sas_address = 0;
unsigned long flags;
struct _tr_list *delayed_tr;
u32 ioc_state;
+ u8 tr_method = 0;
+ struct hba_port *port = NULL;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed: handle(0x%04x)\n",
- __func__, ioc->name, handle));
- return;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ if (ioc->pci_error_recovery) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, handle));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+ __func__, handle));
return;
}
@@ -2833,22 +4103,64 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (test_bit(handle, ioc->pd_handles))
return;
+ clear_bit(handle, ioc->pend_os_device_add);
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device && sas_device->starget &&
sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
+ if (!sas_device) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && pcie_device->starget &&
+ pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = pcie_device->wwid;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(
+ pcie_device->device_info))))
+ tr_method =
+ MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
+ else
+ tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+ }
if (sas_target_priv_data) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle,
- (unsigned long long)sas_address));
- _scsih_ublock_io_device(ioc, sas_address);
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (u64)sas_address));
+ if (sas_device) {
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name));
+ } else if (pcie_device) {
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+ }
+ _scsih_ublock_io_device(ioc, sas_address, port);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -2856,27 +4168,34 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (!smid) {
delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
if (!delayed_tr)
- return;
+ goto out;
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- return;
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
+ goto out;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpi_request->MsgFlags = tr_method;
+ set_bit(handle, ioc->device_remove_in_progress);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
}
/**
@@ -2892,8 +4211,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* handshake protocol with controller firmware.
* It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -2906,67 +4225,104 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
u32 ioc_state;
+ struct _sc_list *delayed_sc;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed\n", __func__, ioc->name));
- return 1;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n", __func__,
- ioc->name));
+ if (ioc->pci_error_recovery) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return 1;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n", __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
return 0;
}
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- return 1;
- }
-
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid_sas_ctrl,
- ioc->tm_sas_control_cb_idx));
+ delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
+ if (!delayed_sc)
+ return _scsih_check_for_pending_tm(ioc, smid);
+ INIT_LIST_HEAD(&delayed_sc->list);
+ delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+ handle));
+ return _scsih_check_for_pending_tm(ioc, smid);
+ }
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+ ioc->put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
+/** _scsih_allow_scmd_to_device - check whether scmd needs to
+ * issue to IOC or not.
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ *
+ * Returns true if scmd can be issued to IOC otherwise returns false.
+ */
+inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
+{
+
+ if (ioc->pci_error_recovery)
+ return false;
+
+ if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
+ if (ioc->remove_host)
+ return false;
+
+ return true;
+ }
+
+ if (ioc->remove_host) {
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
/**
* _scsih_sas_control_complete - completion routine
@@ -2980,8 +4336,8 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* This code is part of the code to initiate the device removal
* handshake protocol with controller firmware.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -2991,17 +4347,21 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (likely(mpi_reply)) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_complete:handle(0x%04x), (open) "
- "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ if (le16_to_cpu(mpi_reply->IOCStatus) ==
+ MPI2_IOCSTATUS_SUCCESS) {
+ clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+ ioc->device_remove_in_progress);
+ }
} else {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
- return 1;
+ return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
/**
@@ -3021,11 +4381,10 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u16 smid;
struct _tr_list *delayed_tr;
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ if (ioc->pci_error_recovery) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return;
}
@@ -3037,22 +4396,21 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
return;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_volume_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_volume_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ ioc->put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3063,8 +4421,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @reply: reply message frame(lower 32bit addr)
* Context: interrupt time.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -3075,39 +4433,171 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2SCSITaskManagementReply_t *mpi_reply =
mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle, le16_to_cpu(mpi_reply->DevHandle),
+ smid));
return 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
return _scsih_check_for_pending_tm(ioc, smid);
}
+/**
+ * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @event: Event ID
+ * @event_context: used to track events uniquely
+ *
+ * Context - processed in interrupt context.
+ */
+static void
+_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
+ U32 event_context)
+{
+ Mpi2EventAckRequest_t *ack_request;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ le16_to_cpu(event), smid, ioc->base_cb_idx));
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = event;
+ ack_request->EventContext = event_context;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
+ * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
+ * sas_io_unit_ctrl messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Context - processed in interrupt context.
+ */
+static void
+_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 handle)
+{
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host has been removed\n",
+ __func__));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
+ return;
+ }
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
+ * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Context: Executed in interrupt context
+ *
+ * This will check delayed internal messages list, and process the
+ * next request.
+ *
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _sc_list *delayed_sc;
+ struct _event_ack_list *delayed_event_ack;
+
+ if (!list_empty(&ioc->delayed_event_ack_list)) {
+ delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
+ struct _event_ack_list, list);
+ _scsih_issue_delayed_event_ack(ioc, smid,
+ delayed_event_ack->Event, delayed_event_ack->EventContext);
+ list_del(&delayed_event_ack->list);
+ kfree(delayed_event_ack);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_sc_list)) {
+ delayed_sc = list_entry(ioc->delayed_sc_list.next,
+ struct _sc_list, list);
+ _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
+ delayed_sc->handle);
+ list_del(&delayed_sc->list);
+ kfree(delayed_sc);
+ return 0;
+ }
+ return 1;
+}
/**
* _scsih_check_for_pending_tm - check for pending task management
@@ -3117,8 +4607,8 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* This will check delayed target reset list, and feed the
* next reqeust.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -3158,8 +4648,6 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* This handles the case where driver receives multiple expander
* add and delete events in a single shot. When there is a delete event
* the routine will void any pending add events waiting in the event queue.
- *
- * Return nothing.
*/
static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -3214,15 +4702,88 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
fw_event->ignore)
continue;
- local_event_data = fw_event->event_data;
+ local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
if (local_event_data->ExpStatus ==
MPI2_EVENT_SAS_TOPO_ES_ADDED ||
local_event_data->ExpStatus ==
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
expander_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag\n"));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_check_pcie_topo_remove_events - sanity check on topo
+ * events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This handles the case where driver receives multiple switch
+ * or device add and delete events in a single shot. When there
+ * is a delete event the routine will void any pending add
+ * events waiting in the event queue.
+ */
+static void
+_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle, switch_handle;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
+ if (!switch_handle) {
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+ return;
+ }
+ /* TODO We are not supporting cascaded PCIe Switch removal yet*/
+ if ((event_data->SwitchStatus
+ == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
+ (event_data->SwitchStatus ==
+ MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+
+ if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ fw_event->event_data;
+ if (local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
+ switch_handle) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag for switch event\n"));
fw_event->ignore = 1;
}
}
@@ -3245,16 +4806,15 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
unsigned long flags;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
if (raid_device && raid_device->starget &&
raid_device->starget->hostdata) {
sas_target_priv_data =
raid_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), "
- "wwid(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)raid_device->wwid));
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -3291,8 +4851,6 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
* volume has been deleted or removed. When the target reset is sent
* to volume, the PD target resets need to be queued to start upon
* completion of the volume target reset.
- *
- * Return nothing.
*/
static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
@@ -3306,6 +4864,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
a = 0;
b = 0;
+ if (ioc->is_warpdrive)
+ return;
+
/* Volume Resets for Deleted or Removed */
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
for (i = 0; i < event_data->NumElements; i++, element++) {
@@ -3355,9 +4916,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
} else
_scsih_tm_tr_send(ioc, handle);
}
@@ -3373,8 +4934,6 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
* This will handle the case when the cable connected to entire volume is
* pulled. We will take care of setting the deleted flag so normal IO will
* not be sent.
- *
- * Return nothing.
*/
static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
@@ -3392,95 +4951,131 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_temp_threshold_events - display temperature threshold exceeded events
+ * @ioc: per adapter object
+ * @event_data: the temp threshold event data
+ * Context: interrupt time.
+ */
+static void
+_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataTemperature_t *event_data)
+{
+ u32 doorbell;
+ if (ioc->temp_sensors_count >= event_data->SensorNum) {
+ ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+ le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+ le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+ le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+ le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+ event_data->SensorNum);
+ ioc_err(ioc, "Current Temp In Celsius: %d\n",
+ event_data->CurrentTemperature);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ }
+ }
+ }
+}
+
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
+{
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
+}
+
+/**
* _scsih_flush_running_cmds - completing outstanding commands.
* @ioc: per adapter object
*
* The flushing out of all pending scmd commands following host reset,
* where all IO is dropped to the floor.
- *
- * Return nothing.
*/
static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
- mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
- if (ioc->pci_error_recovery)
+ if (ioc->pci_error_recovery || ioc->remove_host)
scmd->result = DID_NO_CONNECT << 16;
else
scmd->result = DID_RESET << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
- ioc->name, count));
+ dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
/**
* _scsih_setup_eedp - setup MPI request for EEDP transfer
* @ioc: per adapter object
* @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
*
* Supporting protection 1 and 3.
- *
- * Returns nothing
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- Mpi2SCSIIORequest_t *mpi_request)
+ Mpi25SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
- unsigned char prot_op = scsi_get_prot_op(scmd);
- unsigned char prot_type = scsi_get_prot_type(scmd);
Mpi25SCSIIORequest_t *mpi_request_3v =
(Mpi25SCSIIORequest_t *)mpi_request;
- if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
- return;
-
- if (prot_op == SCSI_PROT_READ_STRIP)
+ switch (scsi_get_prot_op(scmd)) {
+ case SCSI_PROT_READ_STRIP:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
- else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ break;
+ case SCSI_PROT_WRITE_INSERT:
eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
- else
+ break;
+ default:
return;
+ }
- switch (prot_type) {
- case SCSI_PROT_DIF_TYPE1:
- case SCSI_PROT_DIF_TYPE2:
-
- /*
- * enable ref/guard checking
- * auto increment ref tag
- */
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(scsi_get_lba(scmd));
- break;
+ if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- case SCSI_PROT_DIF_TYPE3:
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
- /*
- * enable guard checking
- */
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
- break;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
}
- mpi_request_3v->EEDPBlockSize =
- cpu_to_le16(scmd->device->sector_size);
+ mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
+
+ if (ioc->is_gen35_ioc)
+ eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
}
@@ -3488,8 +5083,6 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
* _scsih_eedp_error_handling - return sense code for EEDP errors
* @scmd: pointer to scsi command object
* @ioc_status: ioc status
- *
- * Returns nothing
*/
static void
_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
@@ -3510,51 +5103,49 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
ascq = 0x00;
break;
}
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
- ascq);
- scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
+ set_host_byte(scmd, DID_ABORT);
}
-
/**
- * _scsih_qcmd_lck - main scsi request entry point
+ * scsih_qcmd - main scsi request entry point
+ * @shost: SCSI host pointer
* @scmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* The callback index is set inside `ioc->scsi_io_cb_idx`.
*
- * Returns 0 on success. If there's a failure, return either:
+ * Return: 0 on success. If there's a failure, return either:
* SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
* SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
*/
static int
-_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
- struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
- Mpi2SCSIIORequest_t *mpi_request;
+ struct _raid_device *raid_device;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ int class;
+ Mpi25SCSIIORequest_t *mpi_request;
+ struct _pcie_device *pcie_device = NULL;
u32 mpi_control;
u16 smid;
u16 handle;
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
-#endif
- scmd->scsi_done = done;
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
- if (ioc->pci_error_recovery || ioc->remove_host) {
+ if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
@@ -3562,26 +5153,49 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
/* invalid device handle */
handle = sas_target_priv_data->handle;
+
+ /*
+ * Avoid error handling escallation when device is disconnected
+ */
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ return 0;
+ }
+ }
+
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
}
- /* host recovery or link resets sent via IOCTLs */
- if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
+ /* host recovery or link resets sent via IOCTLs */
return SCSI_MLQUEUE_HOST_BUSY;
-
- /* device has been deleted */
- else if (sas_target_priv_data->deleted) {
+ } else if (sas_target_priv_data->deleted) {
+ /* device has been deleted */
scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
+ scsi_done(scmd);
return 0;
- /* device busy with task managment */
} else if (sas_target_priv_data->tm_busy ||
- sas_device_priv_data->block)
+ sas_device_priv_data->block) {
+ /* device busy with task management */
return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ } while (_scsih_set_satl_pending(scmd, true));
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3591,29 +5205,29 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
/* set tags */
- if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) {
- if (scmd->device->tagged_supported) {
- if (scmd->device->ordered_tags)
- mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
- else
- mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
- } else
- mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
- } else
- mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
- if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
- scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ /* NCQ Prio supported, make sure control indicated high priority */
+ if (sas_device_priv_data->ncq_prio_enable) {
+ class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (class == IOPRIO_CLASS_RT)
+ mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+ }
+ /* Make sure Device is not raid volume.
+ * We do not expose raid functionality to upper layer for warpdrive.
+ */
+ if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
+ && !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ memset(mpi_request, 0, ioc->request_sz);
_scsih_setup_eedp(ioc, scmd, mpi_request);
if (scmd->cmd_len == 32)
@@ -3632,42 +5246,46 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
mpi_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
- mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
mpi_request->LUN);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
if (mpi_request->DataLength) {
- if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ pcie_device = sas_target_priv_data->pcie_dev;
+ if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
+ raid_device = sas_target_priv_data->raid_device;
+ if (raid_device && raid_device->direct_io_enabled)
+ mpt3sas_setup_direct_io(ioc, scmd,
+ raid_device, mpi_request);
+
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ ioc->put_smid_fast_path(ioc, smid, handle);
} else
- mpt3sas_base_put_smid_scsi_io(ioc, smid, handle);
+ ioc->put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->DevHandle));
} else
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
return 0;
out:
return SCSI_MLQUEUE_HOST_BUSY;
}
-static DEF_SCSI_QCMD(_scsih_qcmd)
-
/**
* _scsih_normalize_sense - normalize descriptor and fixed format sense data
* @sense_buffer: sense data returned by target
* @data: normalized skey/asc/ascq
- *
- * Return nothing.
*/
static void
_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
@@ -3685,18 +5303,16 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
+ * @smid: ?
*
* scsi_status - SCSI Status code returned from target device
* scsi_state - state info associated with SCSI_IO determined by ioc
* ioc_status - ioc supplied status info
- *
- * Return nothing.
*/
static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
@@ -3713,14 +5329,17 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
char *desc_scsi_state = ioc->tmp_string;
u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
struct _sas_device *sas_device = NULL;
- unsigned long flags;
+ struct _pcie_device *pcie_device = NULL;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
char *device_str = NULL;
if (!priv_target)
return;
- device_str = "volume";
+ if (ioc->hide_ir_msg)
+ device_str = "WarpDrive";
+ else
+ device_str = "volume";
if (log_info == 0x31170000)
return;
@@ -3777,6 +5396,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
desc_ioc_state = "eedp app tag error";
break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+ desc_ioc_state = "insufficient power";
+ break;
default:
desc_ioc_state = "unknown";
break;
@@ -3838,72 +5460,77 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
scsi_print_command(scmd);
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
- pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
- device_str, (unsigned long long)priv_target->sas_address);
+ ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+ device_str, (u64)priv_target->sas_address);
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+ (u64)pcie_device->wwid, pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0])
+ ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- priv_target->sas_address);
+ sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
- pr_warn(MPT3SAS_FMT
- "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address, sas_device->phy);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL);
+
+ sas_device_put(sas_device);
}
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
- pr_warn(MPT3SAS_FMT
- "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle),
- desc_ioc_state, ioc_status, smid);
- pr_warn(MPT3SAS_FMT
- "\trequest_len(%d), underflow(%d), resid(%d)\n",
- ioc->name, scsi_bufflen(scmd), scmd->underflow,
- scsi_get_resid(scmd));
- pr_warn(MPT3SAS_FMT
- "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->TaskTag),
- le32_to_cpu(mpi_reply->TransferCount), scmd->result);
- pr_warn(MPT3SAS_FMT
- "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
- ioc->name, desc_scsi_status,
- scsi_status, desc_scsi_state, scsi_state);
+ ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+ ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
- pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ data.skey, data.asc, data.ascq,
+ le32_to_cpu(mpi_reply->SenseCount));
}
-
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
response_bytes = (u8 *)&response_info;
_scsih_response_code(ioc, response_bytes[0]);
}
}
-#endif
/**
- * _scsih_turn_on_fault_led - illuminate Fault LED
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
* @ioc: per adapter object
* @handle: device handle
* Context: process
- *
- * Return nothing.
*/
static void
-_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
Mpi2SepReply_t mpi_reply;
Mpi2SepRequest_t mpi_request;
+ struct _sas_device *sas_device;
+
+ sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
+ if (!sas_device)
+ return;
memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -3914,40 +5541,79 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ sas_device->pfa_led_on = 1;
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ goto out;
+ }
+out:
+ sas_device_put(sas_device);
+}
+
+/**
+ * _scsih_turn_off_pfa_led - turn off Fault LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus = 0;
+ mpi_request.Slot = cpu_to_le16(sas_device->slot);
+ mpi_request.DevHandle = 0;
+ mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
return;
}
}
/**
- * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
-_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct fw_event_work *fw_event;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
- fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+ fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
fw_event->device_handle = handle;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
}
/**
@@ -3955,8 +5621,6 @@ _scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @handle: device handle
* Context: interrupt.
- *
- * Return nothing.
*/
static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3971,33 +5635,32 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* only handle non-raid devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
+ if (!sas_device)
+ goto out_unlock;
+
starget = sas_device->starget;
sas_target_priv_data = starget->hostdata;
if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
- ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
- starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
+ goto out_unlock;
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
- _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+ _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
}
event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
@@ -4014,6 +5677,14 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
mpt3sas_ctl_add_to_event_log(ioc, event_reply);
kfree(event_reply);
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ goto out;
}
/**
@@ -4025,15 +5696,16 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
*
* Callback handler when using _scsih_qcmd.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
{
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -4043,10 +5715,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 response_code = 0;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
+ _scsih_set_satl_pending(scmd, false);
+
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
if (mpi_reply == NULL) {
@@ -4062,6 +5737,23 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ /*
+ * WARPDRIVE: If direct_io is set then it is directIO,
+ * the failed direct I/O should be redirected to volume
+ */
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
+ ((ioc_status & MPI2_IOCSTATUS_MASK)
+ != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
+ st->direct_io = 0;
+ st->scmd = scmd;
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+ mpi_request->DevHandle =
+ cpu_to_le16(sas_device_priv_data->sas_target->handle);
+ ioc->put_smid_scsi_io(ioc, smid,
+ sas_device_priv_data->sas_target->handle);
+ return 0;
+ }
/* turning off TLR */
scsi_state = mpi_reply->SCSIState;
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
@@ -4069,10 +5761,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
if (!sas_device_priv_data->tlr_snoop_check) {
sas_device_priv_data->tlr_snoop_check++;
- if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
- response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
- sas_device_priv_data->flags &=
- ~MPT_DEVICE_TLR_ON;
+ if ((!ioc->is_warpdrive &&
+ !scsih_is_raid(&scmd->device->sdev_gendev) &&
+ !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
+ sas_disable_tlr(scmd->device);
+ sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
+ }
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@@ -4104,8 +5800,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
_scsih_smart_predicted_fault(ioc,
le16_to_cpu(mpi_reply->DevHandle));
mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
- }
+ if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
+ ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
+ (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
+ (scmd->sense_buffer[2] == HARDWARE_ERROR)))
+ _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
+ }
switch (ioc_status) {
case MPI2_IOCSTATUS_BUSY:
case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
@@ -4121,6 +5822,17 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_TRANSPORT_DISRUPTED << 16;
goto out;
}
+ if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
+ /*
+ * This is a ATA NCQ command aborted due to another NCQ
+ * command failure. We must retry this command
+ * immediately but without incrementing its retry
+ * counter.
+ */
+ WARN_ON_ONCE(xfer_cnt != 0);
+ scmd->result = DID_IMM_RETRY << 16;
+ break;
+ }
if (log_info == 0x31110630) {
if (scmd->retries > 2) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4131,6 +5843,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->device->expecting_cc_ua = 1;
}
break;
+ } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
+ scmd->result = DID_RESET << 16;
+ break;
+ } else if ((scmd->device->channel == RAID_CHANNEL) &&
+ (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
+ scmd->result = DID_RESET << 16;
+ break;
}
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -4165,17 +5885,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->sense_buffer[0] = 0x70;
- scmd->sense_buffer[2] = ILLEGAL_REQUEST;
- scmd->sense_buffer[12] = 0x20;
- scmd->sense_buffer[13] = 0;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
+ 0x20, 0);
}
break;
case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
scsi_set_resid(scmd, 0);
+ fallthrough;
case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI2_IOCSTATUS_SUCCESS:
scmd->result = (DID_OK << 16) | scsi_status;
@@ -4202,23 +5919,645 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_INVALID_STATE:
case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
default:
scmd->result = DID_SOFT_ERROR << 16;
break;
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
-#endif
out:
scsi_dma_unmap(scmd);
+ mpt3sas_base_free_smid(ioc, smid);
+ scsi_done(scmd);
+ return 0;
+}
- scmd->scsi_done(scmd);
- return 1;
+/**
+ * _scsih_update_vphys_after_reset - update the Port's
+ * vphys_list after reset
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz, ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_id;
+ Mpi2SasPhyPage0_t phy_pg0;
+ struct hba_port *port, *port_next, *mport;
+ struct virtual_phy *vphy, *vphy_next;
+ struct _sas_device *sas_device;
+
+ /*
+ * Mark all the vphys objects as dirty.
+ */
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
+ }
+ }
+
+ /*
+ * Read SASIOUnitPage0 to get each HBA Phy's data.
+ */
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ /*
+ * Loop over each HBA Phy.
+ */
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ /*
+ * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
+ */
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ /*
+ * Check whether Phy is connected to SEP device or not,
+ * if it is SEP device then read the Phy's SASPHYPage0 data to
+ * determine whether Phy is a virtual Phy or not. if it is
+ * virtual phy then it is conformed that the attached remote
+ * device is a HBA's vSES device.
+ */
+ if (!(le32_to_cpu(
+ sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP))
+ continue;
+
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Get the vSES device's SAS Address.
+ */
+ attached_handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(ioc, attached_handle,
+ &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ found = 0;
+ port = port_next = NULL;
+ /*
+ * Loop over each virtual_phy object from
+ * each port's vphys_list.
+ */
+ list_for_each_entry_safe(port,
+ port_next, &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ /*
+ * Continue with next virtual_phy object
+ * if the object is not marked as dirty.
+ */
+ if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
+ continue;
+
+ /*
+ * Continue with next virtual_phy object
+ * if the object's SAS Address is not equals
+ * to current Phy's vSES device SAS Address.
+ */
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+ /*
+ * Enable current Phy number bit in object's
+ * phy_mask field.
+ */
+ if (!(vphy->phy_mask & (1 << i)))
+ vphy->phy_mask = (1 << i);
+ /*
+ * Get hba_port object from hba_port table
+ * corresponding to current phy's Port ID.
+ * if there is no hba_port object corresponding
+ * to Phy's Port ID then create a new hba_port
+ * object & add to hba_port table.
+ */
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
+ if (!mport) {
+ mport = kzalloc(
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!mport)
+ break;
+ mport->port_id = port_id;
+ ioc_info(ioc,
+ "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
+ __func__, mport, mport->port_id);
+ list_add_tail(&mport->list,
+ &ioc->port_table_list);
+ }
+ /*
+ * If mport & port pointers are not pointing to
+ * same hba_port object then it means that vSES
+ * device's Port ID got changed after reset and
+ * hence move current virtual_phy object from
+ * port's vphys_list to mport's vphys_list.
+ */
+ if (port != mport) {
+ if (!mport->vphys_mask)
+ INIT_LIST_HEAD(
+ &mport->vphys_list);
+ mport->vphys_mask |= (1 << i);
+ port->vphys_mask &= ~(1 << i);
+ list_move(&vphy->list,
+ &mport->vphys_list);
+ sas_device = mpt3sas_get_sdev_by_addr(
+ ioc, attached_sas_addr, port);
+ if (sas_device)
+ sas_device->port = mport;
+ }
+ /*
+ * Earlier while updating the hba_port table,
+ * it is determined that there is no other
+ * direct attached device with mport's Port ID,
+ * Hence mport was marked as dirty. Only vSES
+ * device has this Port ID, so unmark the mport
+ * as dirt.
+ */
+ if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
+ mport->sas_address = 0;
+ mport->phy_mask = 0;
+ mport->flags &=
+ ~HBA_PORT_FLAG_DIRTY_PORT;
+ }
+ /*
+ * Unmark current virtual_phy object as dirty.
+ */
+ vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
+ found = 1;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_get_port_table_after_reset - Construct temporary port table
+ * @ioc: per adapter object
+ * @port_table: address where port table needs to be constructed
+ *
+ * return number of HBA port entries available after reset.
+ */
+static int
+_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table)
+{
+ u16 sz, ioc_status;
+ int i, j;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u64 attached_sas_addr;
+ u8 found = 0, port_count = 0, port_id;
+
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return port_count;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ found = 0;
+ if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
+ MPI2_SAS_NEG_LINK_RATE_1_5)
+ continue;
+ attached_handle =
+ le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
+ if (_scsih_get_sas_address(
+ ioc, attached_handle, &attached_sas_addr) != 0) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+
+ for (j = 0; j < port_count; j++) {
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (port_table[j].port_id == port_id &&
+ port_table[j].sas_address == attached_sas_addr) {
+ port_table[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ port_table[port_count].port_id = port_id;
+ port_table[port_count].phy_mask = (1 << i);
+ port_table[port_count].sas_address = attached_sas_addr;
+ port_count++;
+ }
+out:
+ kfree(sas_iounit_pg0);
+ return port_count;
+}
+
+enum hba_port_matched_codes {
+ NOT_MATCHED = 0,
+ MATCHED_WITH_ADDR_AND_PHYMASK,
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
+ MATCHED_WITH_ADDR_AND_SUBPHYMASK,
+ MATCHED_WITH_ADDR,
+};
+
+/**
+ * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
+ * from HBA port table
+ * @ioc: per adapter object
+ * @port_entry: hba port entry from temporary port table which needs to be
+ * searched for matched entry in the HBA port table
+ * @matched_port_entry: save matched hba port entry here
+ * @count: count of matched entries
+ *
+ * return type of matched entry found.
+ */
+static enum hba_port_matched_codes
+_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_entry,
+ struct hba_port **matched_port_entry, int *count)
+{
+ struct hba_port *port_table_entry, *matched_port = NULL;
+ enum hba_port_matched_codes matched_code = NOT_MATCHED;
+ int lcount = 0;
+ *matched_port_entry = NULL;
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
+ continue;
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask == port_entry->phy_mask)) {
+ matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
+ matched_port = port_table_entry;
+ break;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)
+ && (port_table_entry->port_id == port_entry->port_id)) {
+ matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if ((port_table_entry->sas_address == port_entry->sas_address)
+ && (port_table_entry->phy_mask & port_entry->phy_mask)) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
+ matched_port = port_table_entry;
+ continue;
+ }
+
+ if (port_table_entry->sas_address == port_entry->sas_address) {
+ if (matched_code ==
+ MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
+ continue;
+ if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
+ continue;
+ matched_code = MATCHED_WITH_ADDR;
+ matched_port = port_table_entry;
+ lcount++;
+ }
+ }
+
+ *matched_port_entry = matched_port;
+ if (matched_code == MATCHED_WITH_ADDR)
+ *count = lcount;
+ return matched_code;
+}
+
+/**
+ * _scsih_del_phy_part_of_anther_port - remove phy if it
+ * is a part of anther port
+ *@ioc: per adapter object
+ *@port_table: port table after reset
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *@offset: HBA phy bit offset
+ *
+ */
+static void
+_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *port_table,
+ int index, u8 port_count, int offset)
+{
+ struct _sas_node *sas_node = &ioc->sas_hba;
+ u32 i, found = 0;
+
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (port_table[i].phy_mask & (1 << offset)) {
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ port_table[index].phy_mask |= (1 << offset);
+}
+
+/**
+ * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
+ * right port
+ *@ioc: per adapter object
+ *@hba_port_entry: hba port table entry
+ *@port_table: temporary port table
+ *@index: hba port entry index
+ *@port_count: number of ports available after host reset
+ *
+ */
+static void
+_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct hba_port *hba_port_entry, struct hba_port *port_table,
+ int index, int port_count)
+{
+ u32 phy_mask, offset = 0;
+ struct _sas_node *sas_node = &ioc->sas_hba;
+
+ phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
+
+ for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
+ if (phy_mask & (1 << offset)) {
+ if (!(port_table[index].phy_mask & (1 << offset))) {
+ _scsih_del_phy_part_of_anther_port(
+ ioc, port_table, index, port_count,
+ offset);
+ continue;
+ }
+ if (sas_node->phy[offset].phy_belongs_to_port)
+ mpt3sas_transport_del_phy_from_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset]);
+ mpt3sas_transport_add_phy_to_an_existing_port(
+ ioc, sas_node, &sas_node->phy[offset],
+ hba_port_entry->sas_address,
+ hba_port_entry);
+ }
+ }
+}
+
+/**
+ * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+ struct virtual_phy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!port->vphys_mask)
+ continue;
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
+ vphy, port->port_id,
+ vphy->phy_mask));
+ port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ }
+ if (!port->vphys_mask && !port->sas_address)
+ port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+ }
+}
+
+/**
+ * _scsih_del_dirty_port_entries - delete dirty port entries from port list
+ * after host reset
+ *@ioc: per adapter object
+ *
+ */
+static void
+_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
+ port->flags & HBA_PORT_FLAG_NEW_PORT)
+ continue;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
+ port, port->port_id, port->phy_mask));
+ list_del(&port->list);
+ kfree(port);
+ }
+}
+
+/**
+ * _scsih_sas_port_refresh - Update HBA port table after host reset
+ * @ioc: per adapter object
+ */
+static void
+_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 port_count = 0;
+ struct hba_port *port_table;
+ struct hba_port *port_table_entry;
+ struct hba_port *port_entry = NULL;
+ int i, j, count = 0, lcount = 0;
+ int ret;
+ u64 sas_addr;
+ u8 num_phys;
+
+ drsprintk(ioc, ioc_info(ioc,
+ "updating ports for sas_host(0x%016llx)\n",
+ (unsigned long long)ioc->sas_hba.sas_address));
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (num_phys > ioc->sas_hba.nr_phys_allocated) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc->sas_hba.num_phys = num_phys;
+
+ port_table = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct hba_port), GFP_KERNEL);
+ if (!port_table)
+ return;
+
+ port_count = _scsih_get_port_table_after_reset(ioc, port_table);
+ if (!port_count)
+ return;
+
+ drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
+ for (j = 0; j < port_count; j++)
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table[j].port_id,
+ port_table[j].phy_mask, port_table[j].sas_address));
+
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
+ port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
+
+ drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
+ port_table_entry = NULL;
+ list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
+ drsprintk(ioc, ioc_info(ioc,
+ "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
+ port_table_entry->port_id,
+ port_table_entry->phy_mask,
+ port_table_entry->sas_address));
+ }
+
+ for (j = 0; j < port_count; j++) {
+ ret = _scsih_look_and_get_matched_port_entry(ioc,
+ &port_table[j], &port_entry, &count);
+ if (!port_entry) {
+ drsprintk(ioc, ioc_info(ioc,
+ "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
+ port_table[j].sas_address,
+ port_table[j].port_id));
+ continue;
+ }
+
+ switch (ret) {
+ case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
+ case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ break;
+ case MATCHED_WITH_ADDR:
+ sas_addr = port_table[j].sas_address;
+ for (i = 0; i < port_count; i++) {
+ if (port_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+
+ if (count > 1 || lcount > 1)
+ port_entry = NULL;
+ else
+ _scsih_add_or_del_phys_from_existing_port(ioc,
+ port_entry, port_table, j, port_count);
+ }
+
+ if (!port_entry)
+ continue;
+
+ if (port_entry->port_id != port_table[j].port_id)
+ port_entry->port_id = port_table[j].port_id;
+ port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
+ port_entry->phy_mask = port_table[j].phy_mask;
+ }
+
+ port_table_entry = NULL;
+}
+
+/**
+ * _scsih_alloc_vphy - allocate virtual_phy object
+ * @ioc: per adapter object
+ * @port_id: Port ID number
+ * @phy_num: HBA Phy number
+ *
+ * Returns allocated virtual_phy object.
+ */
+static struct virtual_phy *
+_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
+{
+ struct virtual_phy *vphy;
+ struct hba_port *port;
+
+ port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!port)
+ return NULL;
+
+ vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
+ if (!vphy) {
+ vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
+ /*
+ * Enable bit corresponding to HBA phy number on its
+ * parent hba_port object's vphys_mask field.
+ */
+ port->vphys_mask |= (1 << phy_num);
+ vphy->phy_mask |= (1 << phy_num);
+
+ list_add_tail(&vphy->list, &port->vphys_list);
+
+ ioc_info(ioc,
+ "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
+ vphy, port->port_id, phy_num);
+ }
+ return vphy;
}
/**
@@ -4229,8 +6568,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* During port enable, fw will send topology events for every device. Its
* possible that the handles may change from the previous setting, so this
* code keeping handles updating if changed.
- *
- * Return nothing.
*/
static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
@@ -4241,18 +6578,19 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
u16 attached_handle;
- u8 link_rate;
+ u8 link_rate, port_id;
+ struct hba_port *port;
+ Mpi2SasPhyPage0_t phy_pg0;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "updating handles for sas_host(0x%016llx)\n",
- ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+ dtmprintk(ioc,
+ ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+ (u64)ioc->sas_hba.sas_address));
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
- * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -4265,15 +6603,94 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
if (i == 0)
- ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
- PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.handle = le16_to_cpu(
+ sas_iounit_pg0->PhyData[0].ControllerDevHandle);
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ if (ioc->shost_recovery)
+ port->flags = HBA_PORT_FLAG_NEW_PORT;
+ list_add_tail(&port->list, &ioc->port_table_list);
+ }
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP &&
+ (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (!(le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY))
+ continue;
+ /*
+ * Allocate a virtual_phy object for vSES device, if
+ * this vSES device is hot added.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
+ /*
+ * Add new HBA phys to STL if these new phys got added as part
+ * of HBA Firmware upgrade/downgrade operation.
+ */
+ if (!ioc->sas_hba.phy[i].phy) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
+ &phy_pg0, i))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc,
+ &ioc->sas_hba.phy[i], phy_pg0,
+ ioc->sas_hba.parent_dev);
+ continue;
+ }
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
AttachedDevHandle);
if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
- attached_handle, i, link_rate);
+ attached_handle, i, link_rate,
+ ioc->sas_hba.phy[i].port);
+ }
+ /*
+ * Clear the phy details if this phy got disabled as part of
+ * HBA Firmware upgrade/downgrade operation.
+ */
+ for (i = ioc->sas_hba.num_phys;
+ i < ioc->sas_hba.nr_phys_allocated; i++) {
+ if (ioc->sas_hba.phy[i].phy &&
+ ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
+ SAS_LINK_RATE_1_5_GBPS)
+ mpt3sas_transport_update_links(ioc,
+ ioc->sas_hba.sas_address, 0, i,
+ MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
}
out:
kfree(sas_iounit_pg0);
@@ -4284,8 +6701,6 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
*
* Creating host side data object, stored in ioc->sas_hba
- *
- * Return nothing.
*/
static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
@@ -4300,57 +6715,68 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u16 sz;
u8 device_missing_delay;
+ u8 num_phys, port_id;
+ struct hba_port *port;
- mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
- if (!ioc->sas_hba.num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
+ ioc->sas_hba.nr_phys_allocated = max_t(u8,
+ MPT_MAX_HBA_NUM_PHYS, num_phys);
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.num_phys = num_phys;
+
/* sas_iounit page 0 */
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
/* sas_iounit page 1 */
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit1PhyData_t));
+ sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -4366,50 +6792,74 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
- ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
- sizeof(struct _sas_phy), GFP_KERNEL);
- if (!ioc->sas_hba.phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
i))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if (i == 0)
ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
PhyData[0].ControllerDevHandle);
+
+ port_id = sas_iounit_pg0->PhyData[i].Port;
+ if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
+ port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+ if (!port)
+ goto out;
+
+ port->port_id = port_id;
+ ioc_info(ioc,
+ "hba_port entry: %p, port: %d is added to hba_port list\n",
+ port, port->port_id);
+ list_add_tail(&port->list,
+ &ioc->port_table_list);
+ }
+
+ /*
+ * Check whether current Phy belongs to HBA vSES device or not.
+ */
+ if ((le32_to_cpu(phy_pg0.PhyInfo) &
+ MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
+ (phy_pg0.NegotiatedLinkRate >> 4) >=
+ MPI2_SAS_NEG_LINK_RATE_1_5) {
+ /*
+ * Allocate a virtual_phy object for vSES device.
+ */
+ if (!_scsih_alloc_vphy(ioc, port_id, i))
+ goto out;
+ ioc->sas_hba.phy[i].hba_vphy = 1;
+ }
+
ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
ioc->sas_hba.phy[i].phy_id = i;
+ ioc->sas_hba.phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
phy_pg0, ioc->sas_hba.parent_dev);
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- pr_info(MPT3SAS_FMT
- "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name, ioc->sas_hba.handle,
- (unsigned long long) ioc->sas_hba.sas_address,
- ioc->sas_hba.num_phys) ;
+ ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->sas_hba.handle,
+ (u64)ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys);
if (ioc->sas_hba.enclosure_handle) {
if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -4431,22 +6881,23 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
*
* Creating expander object, stored in ioc->sas_expander_list.
*
- * Return 0 for success, else error.
+ * Return: 0 for success, else error.
*/
static int
_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_node *sas_expander;
+ struct _enclosure_node *enclosure_dev;
Mpi2ConfigReply_t mpi_reply;
Mpi2ExpanderPage0_t expander_pg0;
Mpi2ExpanderPage1_t expander_pg1;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
u32 ioc_status;
u16 parent_handle;
u64 sas_address, sas_address_parent = 0;
int i;
unsigned long flags;
struct _sas_port *mpt3sas_port = NULL;
+ u8 port_id;
int rc = 0;
@@ -4458,16 +6909,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -4475,14 +6926,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
!= 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
+
+ port_id = expander_pg0.PhysicalPort;
if (sas_address_parent != ioc->sas_hba.sas_address) {
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address_parent);
+ sas_address_parent,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_expander) {
rc = _scsih_expander_add(ioc, parent_handle);
@@ -4494,7 +6948,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
@@ -4503,8 +6957,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander = kzalloc(sizeof(struct _sas_node),
GFP_KERNEL);
if (!sas_expander) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -4512,71 +6966,83 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->num_phys = expander_pg0.NumPhys;
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
+ sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_expander->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
- pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
- " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
- handle, parent_handle, (unsigned long long)
- sas_expander->sas_address, sas_expander->num_phys);
+ ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle,
+ (u64)sas_expander->sas_address, sas_expander->num_phys);
- if (!sas_expander->num_phys)
+ if (!sas_expander->num_phys) {
+ rc = -1;
goto out_fail;
+ }
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
INIT_LIST_HEAD(&sas_expander->sas_port_list);
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+ sas_expander->rphy = mpt3sas_port->rphy;
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
sas_expander->phy[i].handle = handle;
sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].port =
+ mpt3sas_get_port_by_id(ioc, port_id, 0);
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
sas_expander->parent_dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
}
if (sas_expander->enclosure_handle) {
- if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_expander->enclosure_handle)))
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
sas_expander->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
_scsih_expander_node_add(ioc, sas_expander);
- return 0;
+ return 0;
out_fail:
if (mpt3sas_port)
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_address_parent);
+ sas_address_parent, sas_expander->port);
kfree(sas_expander);
return rc;
}
@@ -4585,11 +7051,11 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* mpt3sas_expander_remove - removing expander object
* @ioc: per adapter object
* @sas_address: expander sas_address
- *
- * Return nothing.
+ * @port: hba port entry
*/
void
-mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ struct hba_port *port)
{
struct _sas_node *sas_expander;
unsigned long flags;
@@ -4597,11 +7063,12 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
if (ioc->shost_recovery)
return;
+ if (!port)
+ return;
+
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
- sas_address);
- if (sas_expander)
- list_del(&sas_expander->list);
+ sas_address, port);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
_scsih_expander_node_remove(ioc, sas_expander);
@@ -4617,8 +7084,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
* Callback handler when sending internal generated SCSI_IO.
* The callback index passed is `ioc->scsih_cb_idx`
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
static u8
_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
@@ -4652,9 +7119,9 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* @ioc: per adapter object
* @sas_address: sas address
* @handle: sas device handle
- * @access_flags: errors returned during discovery of the device
+ * @access_status: errors returned during discovery of the device
*
- * Return 0 for success, else failure
+ * Return: 0 for success, else failure
*/
static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
@@ -4705,9 +7172,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
if (!rc)
return 0;
- pr_err(MPT3SAS_FMT
- "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc, (unsigned long long)sas_address, handle);
+ ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)sas_address, handle);
return rc;
}
@@ -4716,10 +7182,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
* @handle: attached device handle
- * @phy_numberv: phy number
+ * @phy_number: phy number
* @link_rate: new link rate
- *
- * Returns nothing.
*/
static void
_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
@@ -4727,14 +7191,15 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
+ struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
unsigned long flags;
u64 sas_address;
struct scsi_target *starget;
struct MPT3SAS_TARGET *sas_target_priv_data;
u32 device_info;
-
+ struct hba_port *port;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
@@ -4757,13 +7222,14 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_address);
+ port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
+ if (!port)
+ goto out_unlock;
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ sas_address, port);
- if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ if (!sas_device)
+ goto out_unlock;
if (unlikely(sas_device->handle != handle)) {
starget = sas_device->starget;
@@ -4773,28 +7239,59 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->handle, handle);
sas_target_priv_data->handle = handle;
sas_device->handle = handle;
+ if (le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ sas_device_pg0.EnclosureLevel;
+ memcpy(sas_device->connector_name,
+ sas_device_pg0.ConnectorName, 4);
+ sas_device->connector_name[4] = '\0';
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
+
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ sas_device->enclosure_handle);
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
+ }
+ }
}
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
+ ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
+ goto out_unlock;
}
/* check if there were any issues with discovery */
if (_scsih_check_access_status(ioc, sas_address, handle,
- sas_device_pg0.AccessStatus)) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return;
- }
+ sas_device_pg0.AccessStatus))
+ goto out_unlock;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- _scsih_ublock_io_device(ioc, sas_address);
+ _scsih_ublock_io_device(ioc, sas_address, port);
+
+ if (sas_device)
+ sas_device_put(sas_device);
+ return;
+out_unlock:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ sas_device_put(sas_device);
}
/**
@@ -4806,7 +7303,7 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
*
* Creating end device object, stored in ioc->sas_device_list.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
@@ -4814,25 +7311,25 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
{
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
- Mpi2SasEnclosurePage0_t enclosure_pg0;
struct _sas_device *sas_device;
+ struct _enclosure_node *enclosure_dev = NULL;
u32 ioc_status;
u64 sas_address;
u32 device_info;
- unsigned long flags;
+ u8 port_id;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -4840,13 +7337,14 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
return -1;
+ set_bit(handle, ioc->pend_os_device_add);
sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return -1;
}
@@ -4855,62 +7353,101 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device_pg0.AccessStatus))
return -1;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (sas_device)
+ port_id = sas_device_pg0.PhysicalPort;
+ sas_device = mpt3sas_get_sdev_by_addr(ioc,
+ sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
+ if (sas_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
+ sas_device_put(sas_device);
return -1;
+ }
+
+ if (sas_device_pg0.EnclosureHandle) {
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ le16_to_cpu(sas_device_pg0.EnclosureHandle));
+ if (enclosure_dev == NULL)
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0.EnclosureHandle);
+ }
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
+ kref_init(&sas_device->refcount);
sas_device->handle = handle;
if (_scsih_get_sas_address(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
&sas_device->sas_address_parent) != 0)
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
- sas_device->slot =
- le16_to_cpu(sas_device_pg0.Slot);
+ if (sas_device->enclosure_handle != 0)
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
sas_device->device_info = device_info;
sas_device->sas_address = sas_address;
sas_device->phy = sas_device_pg0.PhyNum;
sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
+ if (!sas_device->port) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
- /* get enclosure_logical_id */
- if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
- ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_device->enclosure_handle)))
+ if (le16_to_cpu(sas_device_pg0.Flags)
+ & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ sas_device_pg0.EnclosureLevel;
+ memcpy(sas_device->connector_name,
+ sas_device_pg0.ConnectorName, 4);
+ sas_device->connector_name[4] = '\0';
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
+ /* get enclosure_logical_id & chassis_slot*/
+ sas_device->is_chassis_slot_valid = 0;
+ if (enclosure_dev) {
sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
+ }
+ }
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+ sas_device->port_type = sas_device_pg0.MaxPortConnections;
+ ioc_info(ioc,
+ "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
+ handle, sas_device->sas_address, sas_device->port_type);
if (ioc->wait_for_discovery_to_complete)
_scsih_sas_device_init_add(ioc, sas_device);
else
_scsih_sas_device_add(ioc, sas_device);
+out:
+ sas_device_put(sas_device);
return 0;
}
/**
* _scsih_remove_device - removing sas device object
* @ioc: per adapter object
- * @sas_device_delete: the sas_device object
- *
- * Return nothing.
+ * @sas_device: the sas_device object
*/
static void
_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
@@ -4918,39 +7455,48 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
{
struct MPT3SAS_TARGET *sas_target_priv_data;
+ if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+ (sas_device->pfa_led_on)) {
+ _scsih_turn_off_pfa_led(ioc, sas_device);
+ sas_device->pfa_led_on = 0;
+ }
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ _scsih_ublock_io_device(ioc, sas_device->sas_address,
+ sas_device->port);
sas_target_priv_data->handle =
MPT3SAS_INVALID_DEVICE_HANDLE;
}
- mpt3sas_transport_port_remove(ioc,
+
+ if (!ioc->hide_drives)
+ mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
+ sas_device->sas_address_parent,
+ sas_device->port);
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
- kfree(sas_device);
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
* _scsih_sas_topology_change_event_debug - debug for topology event
* @ioc: per adapter object
@@ -4986,8 +7532,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "sas topology change: (%s)\n", status_str);
pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
"start_phy(%02d), count(%d)\n",
le16_to_cpu(event_data->ExpanderDevHandle),
@@ -5028,7 +7573,6 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
}
}
-#endif
/**
* _scsih_sas_topology_change_event - handle topology changes
@@ -5049,12 +7593,13 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
- Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+ struct hba_port *port;
+ Mpi2EventDataSasTopologyChangeList_t *event_data =
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_topology_change_event_debug(ioc, event_data);
-#endif
if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
return 0;
@@ -5065,12 +7610,12 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_sas_host_refresh(ioc);
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
/* handle expander add */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
@@ -5083,6 +7628,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_expander) {
sas_address = sas_expander->sas_address;
max_phys = sas_expander->num_phys;
+ port = sas_expander->port;
} else if (parent_handle < ioc->sas_hba.num_phys) {
sas_address = ioc->sas_hba.sas_address;
max_phys = ioc->sas_hba.num_phys;
@@ -5095,8 +7641,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -5125,7 +7671,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
break;
@@ -5133,6 +7679,10 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_check_device(ioc, sas_address, handle,
phy_number, link_rate);
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
+ fallthrough;
case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
@@ -5140,7 +7690,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
mpt3sas_transport_update_links(ioc, sas_address,
- handle, phy_number, link_rate);
+ handle, phy_number, link_rate, port);
_scsih_add_device(ioc, handle, phy_number, 0);
@@ -5155,18 +7705,16 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle expander removal */
if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
sas_expander)
- mpt3sas_expander_remove(ioc, sas_address);
+ mpt3sas_expander_remove(ioc, sas_address, port);
return 0;
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
* _scsih_sas_device_status_change_event_debug - debug for device event
+ * @ioc: ?
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -5218,42 +7766,30 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
reason_str = "unknown reason";
break;
}
- pr_info(MPT3SAS_FMT "device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
- event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+ event_data->ASC, event_data->ASCQ);
+ pr_cont("\n");
}
-#endif
/**
* _scsih_sas_device_status_change_event - handle device status change
* @ioc: per adapter object
- * @fw_event: The fw_event_work object
+ * @event_data: The fw event
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
- struct fw_event_work *fw_event)
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
{
struct MPT3SAS_TARGET *target_priv_data;
struct _sas_device *sas_device;
u64 sas_address;
unsigned long flags;
- Mpi2EventDataSasDeviceStatusChange_t *event_data =
- fw_event->event_data;
-
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
- if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
- _scsih_sas_device_status_change_event_debug(ioc,
- event_data);
-#endif
/* In MPI Revision K (0xC), the internal device reset complete was
* implemented, so avoid setting tm_busy flag for older firmware.
@@ -5269,37 +7805,758 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_address = le64_to_cpu(event_data->SASAddress);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_address);
+ sas_device = __mpt3sas_get_sdev_by_addr(ioc,
+ sas_address,
+ mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
- if (!sas_device || !sas_device->starget) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device || !sas_device->starget)
+ goto out;
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data)
+ goto out;
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ ioc_info(ioc,
+ "%s tm_busy flag for handle(0x%04x)\n",
+ (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
+ target_priv_data->handle);
+
+out:
+ if (sas_device)
+ sas_device_put(sas_device);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+
+/**
+ * _scsih_check_pcie_access_status - check access flags
+ * @ioc: per adapter object
+ * @wwid: wwid
+ * @handle: sas device handle
+ * @access_status: errors returned during discovery of the device
+ *
+ * Return: 0 for success, else failure
+ */
+static u8
+_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
+ case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
+ desc = "PCIe device capability failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
+ desc = "PCIe device blocked";
+ ioc_info(ioc,
+ "Device with Access Status (%s): wwid(0x%016llx), "
+ "handle(0x%04x)\n ll only be added to the internal list",
+ desc, (u64)wwid, handle);
+ rc = 0;
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
+ desc = "PCIe device mem space access failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
+ desc = "PCIe device unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
+ desc = "PCIe device MSIx Required";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
+ desc = "PCIe device init fail max";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
+ desc = "PCIe device status unknown";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
+ desc = "nvme ready timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
+ desc = "nvme device configuration unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
+ desc = "nvme identify failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
+ desc = "nvme qconfig failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
+ desc = "nvme qcreation failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
+ desc = "nvme eventcfg failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
+ desc = "nvme get feature stat failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
+ desc = "nvme idle timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
+ desc = "nvme failure status";
+ break;
+ default:
+ ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+ access_status, (u64)wwid, handle);
+ return rc;
+ }
+
+ if (!rc)
+ return rc;
+
+ ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)wwid, handle);
+ return rc;
+}
+
+/**
+ * _scsih_pcie_device_remove_from_sml - removing pcie device
+ * from SML and free up associated memory
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ */
+static void
+_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ if (pcie_device->starget && pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ if (pcie_device->starget && (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
+ scsi_remove_target(&pcie_device->starget->dev);
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ kfree(pcie_device->serial_number);
+}
+
+
+/**
+ * _scsih_pcie_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @handle: attached device handle
+ */
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ u32 ioc_status;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
+ return;
+
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
return;
}
- target_priv_data = sas_device->starget->hostdata;
- if (!target_priv_data) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (unlikely(pcie_device->handle != handle)) {
+ starget = pcie_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ pcie_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ pcie_device->handle = handle;
+
+ if (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+ }
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
return;
}
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+
+ _scsih_ublock_io_device(ioc, wwid, NULL);
+
+ return;
+}
+
+/**
+ * _scsih_pcie_add_device - creating pcie device object
+ * @ioc: per adapter object
+ * @handle: pcie device handle
+ *
+ * Creating end device object, stored in ioc->pcie_device_list.
+ *
+ * Return: 1 means queue the event later, 0 means complete the event
+ */
+static int
+_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi26PCIeDevicePage2_t pcie_device_pg2;
+ Mpi2ConfigReply_t mpi_reply;
+ struct _pcie_device *pcie_device;
+ struct _enclosure_node *enclosure_dev;
+ u32 ioc_status;
+ u64 wwid;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return 0;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ set_bit(handle, ioc->pend_os_device_add);
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
+ return 0;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus))
+ return 0;
+
+ if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
+ (pcie_device_pg0.DeviceInfo))))
+ return 0;
+
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ if (pcie_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
+ pcie_device_put(pcie_device);
+ return 0;
+ }
+
+ /* PCIe Device Page 2 contains read-only information about a
+ * specific NVMe device; therefore, this page is only
+ * valid for NVMe devices and skip for pcie devices of type scsi.
+ */
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+ &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle)) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+ }
+
+ pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
+ if (!pcie_device) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ kref_init(&pcie_device->refcount);
+ pcie_device->id = ioc->pcie_target_id++;
+ pcie_device->channel = PCIE_CHANNEL;
+ pcie_device->handle = handle;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
+ pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ pcie_device->wwid = wwid;
+ pcie_device->port_num = pcie_device_pg0.PortNum;
+ pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+
+ pcie_device->enclosure_handle =
+ le16_to_cpu(pcie_device_pg0.EnclosureHandle);
+ if (pcie_device->enclosure_handle != 0)
+ pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
+
+ if (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ /* get enclosure_logical_id */
+ if (pcie_device->enclosure_handle) {
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ pcie_device->enclosure_handle);
+ if (enclosure_dev)
+ pcie_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ }
+ /* TODO -- Add device name once FW supports it */
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ pcie_device->nvme_mdts =
+ le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ pcie_device->shutdown_latency =
+ le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+ /*
+ * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+ * if drive's RTD3 Entry Latency is greater then IOC's
+ * max_shutdown_latency.
+ */
+ if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+ ioc->max_shutdown_latency =
+ pcie_device->shutdown_latency;
+ if (pcie_device_pg2.ControllerResetTO)
+ pcie_device->reset_timeout =
+ pcie_device_pg2.ControllerResetTO;
+ else
+ pcie_device->reset_timeout = 30;
+ } else
+ pcie_device->reset_timeout = 30;
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_pcie_device_init_add(ioc, pcie_device);
+ else
+ _scsih_pcie_device_add(ioc, pcie_device);
+
+ pcie_device_put(pcie_device);
+ return 0;
+}
+
+/**
+ * _scsih_pcie_topology_change_event_debug - debug for topology
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->SwitchStatus) {
+ case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
+ status_str = "add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
+ pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
+ "start_port(%02d), count(%d)\n",
+ le16_to_cpu(event_data->SwitchDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPortNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ port_number = event_data->StartPortNum + i;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ status_str = "target add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PortEntry[i].CurrentPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
+ " link rate: new(0x%02x), old(0x%02x)\n", port_number,
+ handle, status_str, link_rate, prev_link_rate);
+ }
+}
+
+/**
+ * _scsih_pcie_topology_change_event - handle PCIe topology
+ * changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static void
+_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 link_rate, prev_link_rate;
+ unsigned long flags;
+ int rc;
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
+ struct _pcie_device *pcie_device;
+
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_topology_change_event_debug(ioc, event_data);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery)
+ return;
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
+ return;
+ }
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring switch event\n"));
+ return;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+
+ link_rate = event_data->PortEntry[i].CurrentPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate == prev_link_rate)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ _scsih_pcie_check_device(ioc, handle);
+
+ /* This code after this point handles the test case
+ * where a device has been added, however its returning
+ * BUSY for sometime. Then before the Device Missing
+ * Delay expires and the device becomes READY, the
+ * device is removed and added back.
+ */
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ break;
+ }
+
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
+ dewtprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+ handle));
+ event_data->PortEntry[i].PortStatus &= 0xF0;
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ fallthrough;
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ rc = _scsih_pcie_add_device(ioc, handle);
+ if (!rc) {
+ /* mark entry vacant */
+ /* TODO This needs to be reviewed and fixed,
+ * we dont have an entry
+ * to make an event void like vacant
+ */
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
+ }
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ _scsih_pcie_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_pcie_device_status_change_event_debug - debug for device event
+ * @ioc: ?
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
+ reason_str = "device init failure";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
+ reason_str = "pcie hot reset failed";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ ioc_info(ioc, "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+ event_data->ASC, event_data->ASCQ);
+ pr_cont("\n");
+}
+
+/**
+ * _scsih_pcie_device_status_change_event - handle device status
+ * change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ */
+static void
+_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
+ (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_device_status_change_event_debug(ioc,
+ event_data);
+
+ if (event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ wwid = le64_to_cpu(event_data->WWID);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device || !pcie_device->starget)
+ goto out;
+
+ target_priv_data = pcie_device->starget->hostdata;
+ if (!target_priv_data)
+ goto out;
+
if (event_data->ReasonCode ==
- MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
target_priv_data->tm_busy = 1;
else
target_priv_data->tm_busy = 0;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+out:
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
* _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
* event
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -5319,32 +8576,77 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
- "\thandle(0x%04x), enclosure logical id(0x%016llx)"
- " number slots(%d)\n", ioc->name, reason_str,
- le16_to_cpu(event_data->EnclosureHandle),
- (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
- le16_to_cpu(event_data->StartSlot));
+ ioc_info(ioc, "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
}
-#endif
/**
* _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ Mpi2ConfigReply_t mpi_reply;
+ struct _enclosure_node *enclosure_dev = NULL;
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data =
+ (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
+ int rc;
+ u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
+
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ (Mpi2EventDataSasEnclDevStatusChange_t *)
fw_event->event_data);
-#endif
+ if (ioc->shost_recovery)
+ return;
+
+ if (enclosure_handle)
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ enclosure_handle);
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ if (!enclosure_dev) {
+ enclosure_dev =
+ kzalloc(sizeof(struct _enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev) {
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_dev->pg0,
+ MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ enclosure_handle);
+
+ if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK)) {
+ kfree(enclosure_dev);
+ return;
+ }
+
+ list_add_tail(&enclosure_dev->list,
+ &ioc->enclosure_list);
+ }
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ if (enclosure_dev) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+ break;
+ default:
+ break;
+ }
}
/**
@@ -5352,8 +8654,6 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
@@ -5361,13 +8661,16 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 termination_count;
u32 query_count;
Mpi2SCSITaskManagementReply_t *mpi_reply;
- Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ fw_event->event_data;
u16 ioc_status;
unsigned long flags;
int r;
@@ -5375,10 +8678,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
u8 task_abort_retries;
mutex_lock(&ioc->tm_cmds.mutex);
- pr_info(MPT3SAS_FMT
- "%s: enter: phy number(%d), width(%d)\n",
- ioc->name, __func__, event_data->PhyNum,
- event_data->PortWidth);
+ ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+ __func__, event_data->PhyNum, event_data->PortWidth);
_scsih_block_io_all_device(ioc);
@@ -5388,21 +8689,22 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
/* sanity checks for retrying this loop */
if (max_retries++ == 5) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
- ioc->name, __func__));
+ dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
goto out;
} else if (max_retries > 1)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
- ioc->name, __func__, max_retries - 1));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: %d retry\n",
+ __func__, max_retries - 1));
termination_count = 0;
query_count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -5415,6 +8717,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (sas_device_priv_data->sas_target->flags &
MPT_TARGET_FLAGS_VOLUME)
continue;
+ /* skip PCIe devices */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_PCIE_DEVICE)
+ continue;
handle = sas_device_priv_data->sas_target->handle;
lun = sas_device_priv_data->lun;
@@ -5425,8 +8731,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
- TM_MUTEX_OFF);
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30, 0);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -5455,9 +8761,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
task_abort_retries = 0;
tm_retry:
if (task_abort_retries++ == 60) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: ABORT_TASK: giving up\n", ioc->name,
- __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+ __func__));
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
goto broadcast_aen_retry;
}
@@ -5466,9 +8772,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out_no_lock;
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd->serial_number, TM_MUTEX_OFF);
- if (r == FAILED) {
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30, 0);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -5486,9 +8792,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
}
if (ioc->broadcast_aen_pending) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: loop back due to pending AEN\n",
- ioc->name, __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc,
+ "%s: loop back due to pending AEN\n",
+ __func__));
ioc->broadcast_aen_pending = 0;
goto broadcast_aen_retry;
}
@@ -5497,9 +8804,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
out_no_lock:
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - exit, query_count = %d termination_count = %d\n",
- ioc->name, __func__, query_count, termination_count));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+ __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
if (!ioc->shost_recovery)
@@ -5512,26 +8819,23 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
- pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
- if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
+ ioc_info(ioc, "discovery event: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_cont("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_cont("\n");
}
-#endif
if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
!ioc->sas_hba.num_phys) {
@@ -5545,12 +8849,70 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_sas_device_discovery_error_event - display SAS device discovery error
+ * events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ */
+static void
+_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
+ (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
+
+ switch (event_data->ReasonCode) {
+ case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
+ break;
+ case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_pcie_enumeration_event - handle enumeration events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ */
+static void
+_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi26EventDataPCIeEnumeration_t *event_data =
+ (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
+ if (event_data->EnumerationStatus)
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
+}
+
+/**
* _scsih_ir_fastpath - turn on fastpath for IR physdisk
* @ioc: per adapter object
* @handle: device handle for physical disk
* @phys_disk_num: physical disk number
*
- * Return 0 for success, else failure.
+ * Return: 0 for success, else failure.
*/
static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
@@ -5563,12 +8925,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
u16 ioc_status;
u32 log_info;
+ if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+ return rc;
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5576,8 +8939,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
rc = -EAGAIN;
goto out;
@@ -5591,19 +8953,18 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
mpi_request->PhysDiskNum = phys_disk_num;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
- "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
- handle, phys_disk_num));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+ handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
- if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
- issue_reset = 1;
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
rc = -EFAULT;
goto out;
}
@@ -5618,15 +8979,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
log_info = 0;
ioc_status &= MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: failed: ioc_status(0x%04x), "
- "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
- log_info));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+ ioc_status, log_info));
rc = -EFAULT;
} else
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: completed successfully\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
}
out:
@@ -5634,8 +8993,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mutex_unlock(&ioc->scsih_cmds.mutex);
if (issue_reset)
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return rc;
}
@@ -5648,11 +9006,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
static void
_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
{
- int rc;
sdev->no_uld_attach = no_uld_attach ? 1 : 0;
sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hidding" : "exposing");
- rc = scsi_device_reprobe(sdev);
+ sdev->no_uld_attach ? "hiding" : "exposing");
+ WARN_ON(scsi_device_reprobe(sdev));
}
/**
@@ -5660,8 +9017,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
@@ -5675,9 +9030,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -5690,9 +9044,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -5718,8 +9071,6 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @handle: volume device handle
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -5730,16 +9081,15 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct scsi_target *starget = NULL;
spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
if (raid_device) {
if (raid_device->starget) {
starget = raid_device->starget;
sas_target_priv_data = starget->hostdata;
sas_target_priv_data->deleted = 1;
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
list_del(&raid_device->list);
kfree(raid_device);
}
@@ -5753,8 +9103,6 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
@@ -5767,7 +9115,7 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device) {
sas_device->volume_handle = 0;
sas_device->volume_wwid = 0;
@@ -5786,6 +9134,8 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
/* exposing raid component */
if (starget)
starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+
+ sas_device_put(sas_device);
}
/**
@@ -5793,8 +9143,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
@@ -5814,7 +9162,7 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
&volume_wwid);
spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device) {
set_bit(handle, ioc->pd_handles);
if (sas_device->starget && sas_device->starget->hostdata) {
@@ -5832,8 +9180,11 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
/* hiding raid component */
_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+
if (starget)
starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+
+ sas_device_put(sas_device);
}
/**
@@ -5841,8 +9192,6 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
@@ -5858,15 +9207,12 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @element: IR config element data
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element)
{
struct _sas_device *sas_device;
- unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
@@ -5876,46 +9222,44 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
set_bit(handle, ioc->pd_handles);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
if (sas_device) {
_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ sas_device_put(sas_device);
return;
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
_scsih_add_device(ioc, handle, 0, 1);
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
* _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -5928,10 +9272,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
- pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
- ioc->name, (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
- "foreign" : "native", event_data->NumElements);
+ ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+ le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+ "foreign" : "native",
+ event_data->NumElements);
for (i = 0; i < event_data->NumElements; i++, element++) {
switch (element->ReasonCode) {
case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -5988,15 +9332,12 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
element->PhysDiskNum);
}
}
-#endif
/**
* _scsih_sas_ir_config_change_event - handle ir configuration change events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6005,20 +9346,20 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element;
int i;
u8 foreign_config;
- Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrConfigChangeList_t *event_data =
+ (Mpi2EventDataIrConfigChangeList_t *)
+ fw_event->event_data;
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
- if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
+ (!ioc->hide_ir_msg))
_scsih_sas_ir_config_change_event_debug(ioc, event_data);
-#endif
-
foreign_config = (le32_to_cpu(event_data->Flags) &
MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
- if (ioc->shost_recovery) {
-
+ if (ioc->shost_recovery &&
+ ioc->hba_mpi_version_belonged != MPI2_VERSION) {
for (i = 0; i < event_data->NumElements; i++, element++) {
if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
_scsih_ir_fastpath(ioc,
@@ -6027,6 +9368,7 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
}
return;
}
+
for (i = 0; i < event_data->NumElements; i++, element++) {
switch (element->ReasonCode) {
@@ -6042,16 +9384,20 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
le16_to_cpu(element->VolDevHandle));
break;
case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
- _scsih_sas_pd_hide(ioc, element);
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_hide(ioc, element);
break;
case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
- _scsih_sas_pd_expose(ioc, element);
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_expose(ioc, element);
break;
case MPI2_EVENT_IR_CHANGE_RC_HIDE:
- _scsih_sas_pd_add(ioc, element);
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_add(ioc, element);
break;
case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
- _scsih_sas_pd_delete(ioc, element);
+ if (!ioc->is_warpdrive)
+ _scsih_sas_pd_delete(ioc, element);
break;
}
}
@@ -6062,8 +9408,6 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6075,7 +9419,8 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
u16 handle;
u32 state;
int rc;
- Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrVolume_t *event_data =
+ (Mpi2EventDataIrVolume_t *) fw_event->event_data;
if (ioc->shost_recovery)
return;
@@ -6085,10 +9430,12 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
handle = le16_to_cpu(event_data->VolDevHandle);
state = le32_to_cpu(event_data->NewValue);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ if (!ioc->hide_ir_msg)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_VOL_STATE_MISSING:
case MPI2_RAID_VOL_STATE_FAILED:
@@ -6100,7 +9447,7 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
case MPI2_RAID_VOL_STATE_OPTIMAL:
spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (raid_device)
@@ -6108,17 +9455,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
@@ -6144,8 +9489,6 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
@@ -6154,11 +9497,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
u16 handle, parent_handle;
u32 state;
struct _sas_device *sas_device;
- unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u32 ioc_status;
- Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
u64 sas_address;
if (ioc->shost_recovery)
@@ -6170,10 +9513,13 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
handle = le16_to_cpu(event_data->PhysDiskDevHandle);
state = le32_to_cpu(event_data->NewValue);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ if (!ioc->hide_ir_msg)
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
+
switch (state) {
case MPI2_RAID_PD_STATE_ONLINE:
case MPI2_RAID_PD_STATE_DEGRADED:
@@ -6181,34 +9527,37 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
case MPI2_RAID_PD_STATE_OPTIMAL:
case MPI2_RAID_PD_STATE_HOT_SPARE:
- set_bit(handle, ioc->pd_handles);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!ioc->is_warpdrive)
+ set_bit(handle, ioc->pd_handles);
- if (sas_device)
+ sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device_put(sas_device);
return;
+ }
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc,
+ sas_device_pg0.PhysicalPort, 0));
_scsih_add_device(ioc, handle, 0, 1);
@@ -6222,14 +9571,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
}
}
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
/**
* _scsih_sas_ir_operation_status_event_debug - debug for IR op event
* @ioc: per adapter object
* @event_data: event data payload
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
@@ -6258,43 +9604,40 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
if (!reason_str)
return;
- pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
- "\thandle(0x%04x), percent complete(%d)\n",
- ioc->name, reason_str,
- le16_to_cpu(event_data->VolDevHandle),
- event_data->PercentComplete);
+ ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
}
-#endif
/**
* _scsih_sas_ir_operation_status_event - handle RAID operation events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ (Mpi2EventDataIrOperationStatus_t *)
+ fw_event->event_data;
static struct _raid_device *raid_device;
unsigned long flags;
u16 handle;
-#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
- if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
+ (!ioc->hide_ir_msg))
_scsih_sas_ir_operation_status_event_debug(ioc,
event_data);
-#endif
/* code added for raid transport support */
if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
spin_lock_irqsave(&ioc->raid_device_lock, flags);
handle = le16_to_cpu(event_data->VolDevHandle);
- raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
if (raid_device)
raid_device->percent_complete =
event_data->PercentComplete;
@@ -6323,69 +9666,190 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_update_device_qdepth - Update QD during Reset.
+ * @ioc: per adapter object
+ *
+ */
+static void
+_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ ioc_info(ioc, "Update devices with firmware reported queue depth\n");
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target) {
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ sas_device = sas_device_priv_data->sas_target->sas_dev;
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
+ qdepth = ioc->max_nvme_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ qdepth = (sas_device->port_type > 1) ?
+ ioc->max_wideport_qd : ioc->max_narrowport_qd;
+ else if (sas_device &&
+ sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ qdepth = ioc->max_sata_qd;
+ else
+ continue;
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ }
+ }
+}
+
+/**
* _scsih_mark_responding_sas_device - mark a sas_devices as responding
* @ioc: per adapter object
- * @sas_address: sas address
- * @slot: enclosure slot id
- * @handle: device handle
+ * @sas_device_pg0: SAS Device page 0
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_sas_devices.
- *
- * Return nothing.
*/
static void
-_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u16 slot, u16 handle)
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
+Mpi2SasDevicePage0_t *sas_device_pg0)
{
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
+ struct _enclosure_node *enclosure_dev = NULL;
unsigned long flags;
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, sas_device_pg0->PhysicalPort, 0);
+ if (sas_device_pg0->EnclosureHandle) {
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ le16_to_cpu(sas_device_pg0->EnclosureHandle));
+ if (enclosure_dev == NULL)
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0->EnclosureHandle);
+ }
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->sas_address == sas_address &&
- sas_device->slot == slot) {
- sas_device->responding = 1;
- starget = sas_device->starget;
- if (starget && starget->hostdata) {
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- sas_target_priv_data->deleted = 0;
- } else
- sas_target_priv_data = NULL;
- if (starget)
+ if (sas_device->sas_address != le64_to_cpu(
+ sas_device_pg0->SASAddress))
+ continue;
+ if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
+ continue;
+ if (sas_device->port != port)
+ continue;
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx)\n",
+ le16_to_cpu(sas_device_pg0->DevHandle),
+ (unsigned long long)
+ sas_device->sas_address);
+
+ if (sas_device->enclosure_handle != 0)
starget_printk(KERN_INFO, starget,
- "handle(0x%04x), sas_addr(0x%016llx), "
- "enclosure logical id(0x%016llx), "
- "slot(%d)\n", handle,
- (unsigned long long)sas_device->sas_address,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->handle == handle)
- goto out;
- pr_info("\thandle changed from(0x%04x)!!!\n",
- sas_device->handle);
- sas_device->handle = handle;
- if (sas_target_priv_data)
- sas_target_priv_data->handle = handle;
- goto out;
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
}
+ if (le16_to_cpu(sas_device_pg0->Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+ sas_device->enclosure_level =
+ sas_device_pg0->EnclosureLevel;
+ memcpy(&sas_device->connector_name[0],
+ &sas_device_pg0->ConnectorName[0], 4);
+ } else {
+ sas_device->enclosure_level = 0;
+ sas_device->connector_name[0] = '\0';
+ }
+
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+ if (enclosure_dev) {
+ sas_device->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_dev->pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_dev->pg0.ChassisSlot;
+ }
+ }
+
+ if (sas_device->handle == le16_to_cpu(
+ sas_device_pg0->DevHandle))
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = le16_to_cpu(
+ sas_device_pg0->DevHandle);
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ le16_to_cpu(sas_device_pg0->DevHandle);
+ goto out;
}
out:
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
/**
+ * _scsih_create_enclosure_list_after_reset - Free Existing list,
+ * And create enclosure list by scanning all Enclosure Page(0)s
+ * @ioc: per adapter object
+ */
+static void
+_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _enclosure_node *enclosure_dev;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 enclosure_handle;
+ int rc;
+
+ /* Free existing enclosure list */
+ mpt3sas_free_enclosure_list(ioc);
+
+ /* Re constructing enclosure list after reset*/
+ enclosure_handle = 0xFFFF;
+ do {
+ enclosure_dev =
+ kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
+ if (!enclosure_dev) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+ rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_dev->pg0,
+ MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
+ enclosure_handle);
+
+ if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK)) {
+ kfree(enclosure_dev);
+ return;
+ }
+ list_add_tail(&enclosure_dev->list,
+ &ioc->enclosure_list);
+ enclosure_handle =
+ le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
+ } while (1);
+}
+
+/**
* _scsih_search_responding_sas_devices -
* @ioc: per adapter object
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -6396,7 +9860,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->sas_device_list))
goto out;
@@ -6413,14 +9877,130 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
if (!(_scsih_is_end_device(device_info)))
continue;
- _scsih_mark_responding_sas_device(ioc,
- le64_to_cpu(sas_device_pg0.SASAddress),
- le16_to_cpu(sas_device_pg0.Slot), handle);
+ _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
+ }
+
+ out:
+ ioc_info(ioc, "search for end-devices: complete\n");
+}
+
+/**
+ * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
+ * @ioc: per adapter object
+ * @pcie_device_pg0: PCIe Device page 0
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponding_devices.
+ */
+static void
+_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26PCIeDevicePage0_t *pcie_device_pg0)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
+ && (pcie_device->slot == le16_to_cpu(
+ pcie_device_pg0->Slot))) {
+ pcie_device->access_status =
+ pcie_device_pg0->AccessStatus;
+ pcie_device->responding = 1;
+ starget = pcie_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx) ",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ }
+
+ if (((le32_to_cpu(pcie_device_pg0->Flags)) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
+ (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0->EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0->ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ if (pcie_device->handle == le16_to_cpu(
+ pcie_device_pg0->DevHandle))
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ pcie_device->handle);
+ pcie_device->handle = le16_to_cpu(
+ pcie_device_pg0->DevHandle);
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ le16_to_cpu(pcie_device_pg0->DevHandle);
+ goto out;
+ }
}
out:
- pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
- ioc->name);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_pcie_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ */
+static void
+_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ ioc_info(ioc, "search for end-devices: start\n");
+
+ if (list_empty(&ioc->pcie_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
+ continue;
+ _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
+ }
+out:
+ ioc_info(ioc, "search for PCIe end-devices: complete\n");
}
/**
@@ -6431,14 +10011,12 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_raid_devices.
- *
- * Return nothing.
*/
static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
u16 handle)
{
- struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
struct _raid_device *raid_device;
unsigned long flags;
@@ -6457,6 +10035,13 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
(unsigned long long)raid_device->wwid);
+
+ /*
+ * WARPDRIVE: The handles of the PDs might have changed
+ * across the host reset so re-initialize the
+ * required data for Direct IO
+ */
+ mpt3sas_init_warpdrive_properties(ioc, raid_device);
spin_lock_irqsave(&ioc->raid_device_lock, flags);
if (raid_device->handle == handle) {
spin_unlock_irqrestore(&ioc->raid_device_lock,
@@ -6481,8 +10066,6 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
@@ -6498,8 +10081,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
- ioc->name);
+ ioc_info(ioc, "search for raid volumes: start\n");
if (list_empty(&ioc->raid_device_list))
goto out;
@@ -6526,6 +10108,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
}
/* refresh the pd_handles */
+ if (!ioc->is_warpdrive) {
phys_disk_num = 0xFF;
memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
@@ -6539,35 +10122,53 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
handle = le16_to_cpu(pd_pg0.DevHandle);
set_bit(handle, ioc->pd_handles);
}
+ }
out:
- pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for responding raid volumes: complete\n");
}
/**
* _scsih_mark_responding_expander - mark a expander as responding
* @ioc: per adapter object
- * @sas_address: sas address
- * @handle:
+ * @expander_pg0:SAS Expander Config Page0
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
- *
- * Return nothing.
*/
static void
-_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u16 handle)
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ExpanderPage0_t *expander_pg0)
{
- struct _sas_node *sas_expander;
+ struct _sas_node *sas_expander = NULL;
unsigned long flags;
int i;
+ struct _enclosure_node *enclosure_dev = NULL;
+ u16 handle = le16_to_cpu(expander_pg0->DevHandle);
+ u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
+ u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
+ struct hba_port *port = mpt3sas_get_port_by_id(
+ ioc, expander_pg0->PhysicalPort, 0);
+
+ if (enclosure_handle)
+ enclosure_dev =
+ mpt3sas_scsih_enclosure_find_by_handle(ioc,
+ enclosure_handle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
+ if (sas_expander->port != port)
+ continue;
sas_expander->responding = 1;
+
+ if (enclosure_dev) {
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
+ sas_expander->enclosure_handle =
+ le16_to_cpu(expander_pg0->EnclosureHandle);
+ }
+
if (sas_expander->handle == handle)
goto out;
pr_info("\texpander(0x%016llx): handle changed" \
@@ -6589,8 +10190,6 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
*
* After host reset, find out whether devices are still responding.
* If not remove.
- *
- * Return nothing.
*/
static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
@@ -6600,8 +10199,9 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u64 sas_address;
u16 handle;
+ u8 port;
- pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+ ioc_info(ioc, "search for expanders: start\n");
if (list_empty(&ioc->sas_expander_list))
goto out;
@@ -6617,50 +10217,103 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
handle = le16_to_cpu(expander_pg0.DevHandle);
sas_address = le64_to_cpu(expander_pg0.SASAddress);
- pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
- handle,
- (unsigned long long)sas_address);
- _scsih_mark_responding_expander(ioc, sas_address, handle);
+ port = expander_pg0.PhysicalPort;
+ pr_info(
+ "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ handle, (unsigned long long)sas_address,
+ (ioc->multipath_on_hba ?
+ port : MULTIPATH_DISABLED_PORT_ID));
+ _scsih_mark_responding_expander(ioc, &expander_pg0);
}
out:
- pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+ ioc_info(ioc, "search for expanders: complete\n");
}
/**
- * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
-_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander, *sas_expander_next;
struct _raid_device *raid_device, *raid_device_next;
+ struct _pcie_device *pcie_device, *pcie_device_next;
struct list_head tmp_list;
unsigned long flags;
+ LIST_HEAD(head);
- pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: start\n");
/* removing unresponding end devices */
- pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: end-devices\n");
+ /*
+ * Iterate, pulling off devices marked as non-responding. We become the
+ * owner for the reference the list had on any object we prune.
+ */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Clean up the sas_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_init_list, list) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
if (!sas_device->responding)
- mpt3sas_device_remove_by_sas_address(ioc,
- sas_device->sas_address);
+ list_move_tail(&sas_device->list, &head);
else
sas_device->responding = 0;
}
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ /*
+ * Now, uninitialize and remove the unresponding devices we pruned.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
+ _scsih_remove_device(ioc, sas_device);
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
+ ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
+ INIT_LIST_HEAD(&head);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ /*
+ * Clean up the pcie_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_init_list, list) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_list, list) {
+ if (!pcie_device->responding)
+ list_move_tail(&pcie_device->list, &head);
+ else
+ pcie_device->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
/* removing unresponding volumes */
if (ioc->ir_firmware) {
- pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: volumes\n");
list_for_each_entry_safe(raid_device, raid_device_next,
&ioc->raid_device_list, list) {
if (!raid_device->responding)
@@ -6672,8 +10325,7 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
/* removing unresponding expanders */
- pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: expanders\n");
spin_lock_irqsave(&ioc->sas_node_lock, flags);
INIT_LIST_HEAD(&tmp_list);
list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -6686,12 +10338,10 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
list) {
- list_del(&sas_expander->list);
_scsih_expander_node_remove(ioc, sas_expander);
}
- pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: complete\n");
/* unblock devices */
_scsih_ublock_io_all_device(ioc);
@@ -6708,48 +10358,49 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
le16_to_cpu(expander_pg1.AttachedDevHandle), i,
- expander_pg1.NegotiatedLinkRate >> 4);
+ expander_pg1.NegotiatedLinkRate >> 4,
+ sas_expander->port);
}
}
/**
* _scsih_scan_for_devices_after_reset - scan for devices after host reset
* @ioc: per adapter object
- *
- * Return nothing.
*/
static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
Mpi2RaidVolPage1_t volume_pg1;
Mpi2RaidVolPage0_t volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
Mpi2EventIrConfigElement_t element;
Mpi2ConfigReply_t mpi_reply;
- u8 phys_disk_num;
+ u8 phys_disk_num, port_id;
u16 ioc_status;
u16 handle, parent_handle;
u64 sas_address;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _sas_node *expander_device;
static struct _raid_device *raid_device;
u8 retry_count;
unsigned long flags;
- pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+ ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
- pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders start\n");
/* expanders */
handle = 0xFFFF;
@@ -6758,40 +10409,37 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(expander_pg0.DevHandle);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ port_id = expander_pg0.PhysicalPort;
expander_device = mpt3sas_scsih_expander_find_by_sas_address(
- ioc, le64_to_cpu(expander_pg0.SASAddress));
+ ioc, le64_to_cpu(expander_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (expander_device)
_scsih_refresh_expander_links(ioc, expander_device,
handle);
else {
- pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
_scsih_expander_add(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders complete\n");
if (!ioc->ir_firmware)
goto skip_to_sas;
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk start\n");
/* phys disk */
phys_disk_num = 0xFF;
@@ -6801,19 +10449,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
phys_disk_num = pd_pg0.PhysDiskNum;
handle = le16_to_cpu(pd_pg0.DevHandle);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device_put(sas_device);
continue;
+ }
if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle) != 0)
@@ -6821,22 +10467,21 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle,
&sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
+ port_id = sas_device_pg0.PhysicalPort;
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
- MPI2_SAS_NEG_LINK_RATE_1_5);
+ MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
set_bit(handle, ioc->pd_handles);
retry_count = 0;
/* This will retry adding the end device.
@@ -6847,17 +10492,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk complete\n");
- pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes start\n");
/* volumes */
handle = 0xFFFF;
@@ -6866,10 +10509,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -6886,10 +10527,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -6898,23 +10537,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
element.VolDevHandle = volume_pg1.DevHandle;
- pr_info(MPT3SAS_FMT
- "\tBEFORE adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
_scsih_sas_volume_add(ioc, &element);
- pr_info(MPT3SAS_FMT
- "\tAFTER adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes complete\n");
skip_to_sas:
- pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices start\n");
/* sas devices */
handle = 0xFFFF;
@@ -6924,30 +10559,30 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
- " ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(sas_device_pg0.DevHandle);
if (!(_scsih_is_end_device(
le32_to_cpu(sas_device_pg0.DeviceInfo))))
continue;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
- le64_to_cpu(sas_device_pg0.SASAddress));
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ port_id = sas_device_pg0.PhysicalPort;
+ sas_device = mpt3sas_get_sdev_by_addr(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
+ if (sas_device) {
+ sas_device_put(sas_device);
continue;
+ }
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
+ mpt3sas_get_port_by_id(ioc, port_id, 0));
retry_count = 0;
/* This will retry adding the end device.
* _scsih_add_device() will decide on retries and
@@ -6957,66 +10592,111 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
0)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+ ioc_info(ioc, "\tscan devices: end devices complete\n");
+ ioc_info(ioc, "\tscan devices: pcie end devices start\n");
+
+ /* pcie devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ if (!(_scsih_is_nvme_pciescsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ continue;
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
+ le64_to_cpu(pcie_device_pg0.WWID));
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ continue;
}
+ retry_count = 0;
+ parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
+ _scsih_pcie_add_device(ioc, handle);
+
+ ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
- pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+ ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+ ioc_info(ioc, "scan devices: complete\n");
}
+
/**
- * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
* @ioc: per adapter object
- * @reset_phase: phase
*
* The handler for doing any required cleanup or initialization.
+ */
+void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
+}
+
+/**
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ * scsi & tm cmds.
+ * @ioc: per adapter object
*
- * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
- * MPT3_IOC_DONE_RESET
- *
- * Return nothing.
+ * The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
{
- switch (reset_phase) {
- case MPT3_IOC_PRE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
- break;
- case MPT3_IOC_AFTER_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
- if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
- ioc->scsih_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
- complete(&ioc->scsih_cmds.done);
- }
- if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
- ioc->tm_cmds.status |= MPT3_CMD_RESET;
- mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
- complete(&ioc->tm_cmds.done);
- }
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
- _scsih_fw_event_cleanup_queue(ioc);
- _scsih_flush_running_cmds(ioc);
- break;
- case MPT3_IOC_DONE_RESET:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
- _scsih_prep_device_scan(ioc);
- _scsih_search_responding_sas_devices(ioc);
- _scsih_search_responding_raid_devices(ioc);
- _scsih_search_responding_expanders(ioc);
- _scsih_error_recovery_delete_devices(ioc);
+ memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+ memset(ioc->device_remove_in_progress, 0,
+ ioc->device_remove_in_progress_sz);
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+}
+
+/**
+ * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ *
+ * The handler for doing any required cleanup or initialization.
+ */
+void
+mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
+{
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
+ if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
+ if (ioc->multipath_on_hba) {
+ _scsih_sas_port_refresh(ioc);
+ _scsih_update_vphys_after_reset(ioc);
}
- break;
+ _scsih_prep_device_scan(ioc);
+ _scsih_create_enclosure_list_after_reset(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
}
}
@@ -7025,50 +10705,83 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
- *
- * Return nothing.
*/
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
{
+ ioc->current_event = fw_event;
+ _scsih_fw_event_del_from_list(ioc, fw_event);
+
/* the queue is being flushed so ignore this event */
- if (ioc->remove_host || fw_event->cancel_pending_work ||
- ioc->pci_error_recovery) {
- _scsih_fw_event_free(ioc, fw_event);
+ if (ioc->remove_host || ioc->pci_error_recovery) {
+ fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
return;
}
switch (fw_event->event) {
case MPT3SAS_PROCESS_TRIGGER_DIAG:
- mpt3sas_process_trigger_data(ioc, fw_event->event_data);
+ mpt3sas_process_trigger_data(ioc,
+ (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
+ fw_event->event_data);
break;
case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
- while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ while (scsi_host_in_recovery(ioc->shost) ||
+ ioc->shost_recovery) {
+ /*
+ * If we're unloading or cancelling the work, bail.
+ * Otherwise, this can become an infinite loop.
+ */
+ if (ioc->remove_host || ioc->fw_events_cleanup)
+ goto out;
ssleep(1);
- _scsih_remove_unresponding_sas_devices(ioc);
+ }
+ _scsih_remove_unresponding_devices(ioc);
+ _scsih_del_dirty_vphy(ioc);
+ _scsih_del_dirty_port_entries(ioc);
+ if (ioc->is_gen35_ioc)
+ _scsih_update_device_qdepth(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ /*
+ * If diag reset has occurred during the driver load
+ * then driver has to complete the driver load operation
+ * by executing the following items:
+ *- Register the devices from sas_device_init_list to SML
+ *- clear is_driver_loading flag,
+ *- start the watchdog thread.
+ * In happy driver load path, above things are taken care of when
+ * driver executes scsih_scan_finished().
+ */
+ if (ioc->is_driver_loading)
+ _scsih_complete_devices_scanning(ioc);
+ _scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "port enable: complete from worker thread\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "port enable: complete from worker thread\n"));
break;
- case MPT3SAS_TURN_ON_FAULT_LED:
- _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ case MPT3SAS_TURN_ON_PFA_LED:
+ _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
break;
case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
_scsih_sas_topology_change_event(ioc, fw_event);
break;
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
- _scsih_sas_device_status_change_event(ioc, fw_event);
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data);
break;
case MPI2_EVENT_SAS_DISCOVERY:
_scsih_sas_discovery_event(ioc, fw_event);
break;
+ case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ _scsih_sas_device_discovery_error_event(ioc, fw_event);
+ break;
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
_scsih_sas_broadcast_primitive_event(ioc, fw_event);
break;
@@ -7088,19 +10801,27 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_IR_OPERATION_STATUS:
_scsih_sas_ir_operation_status_event(ioc, fw_event);
break;
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ _scsih_pcie_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ _scsih_pcie_enumeration_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_pcie_topology_change_event(ioc, fw_event);
+ break;
}
- _scsih_fw_event_free(ioc, fw_event);
+out:
+ fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
}
/**
* _firmware_event_work
- * @ioc: per adapter object
* @work: The fw_event_work object
* Context: user.
*
* wrappers for the work thread handling firmware events
- *
- * Return nothing.
*/
static void
@@ -7122,8 +10843,8 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Return: 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
*/
u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
@@ -7133,16 +10854,17 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
Mpi2EventNotificationReply_t *mpi_reply;
u16 event;
u16 sz;
+ Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
- /* events turned off due to host reset or driver unloading */
- if (ioc->remove_host || ioc->pci_error_recovery)
+ /* events turned off due to host reset */
+ if (ioc->pci_error_recovery)
return 1;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -7175,6 +10897,23 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
_scsih_check_topo_delete_events(ioc,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
+ /*
+ * No need to add the topology change list
+ * event to fw event work queue when
+ * diag reset is going on. Since during diag
+ * reset driver scan the devices by reading
+ * sas device page0's not by processing the
+ * events.
+ */
+ if (ioc->shost_recovery)
+ return 1;
+ break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_pcie_topo_remove_events(ioc,
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ mpi_reply->EventData);
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
@@ -7186,30 +10925,91 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi2EventDataIrVolume_t *)
mpi_reply->EventData);
break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ {
+ Mpi2EventDataLogEntryAdded_t *log_entry;
+ u32 log_code;
+
+ if (!ioc->is_warpdrive)
+ break;
+
+ log_entry = (Mpi2EventDataLogEntryAdded_t *)
+ mpi_reply->EventData;
+ log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
+
+ if (le16_to_cpu(log_entry->LogEntryQualifier)
+ != MPT2_WARPDRIVE_LOGENTRY)
+ break;
+ switch (log_code) {
+ case MPT2_WARPDRIVE_LC_SSDT:
+ ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
+ break;
+ case MPT2_WARPDRIVE_LC_SSDLW:
+ ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
+ break;
+ case MPT2_WARPDRIVE_LC_SSDLF:
+ ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
+ break;
+ case MPT2_WARPDRIVE_LC_BRMF:
+ ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
+ break;
+ }
+
+ break;
+ }
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ mpi_reply->EventData);
+ break;
case MPI2_EVENT_IR_OPERATION_STATUS:
case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_PHYSICAL_DISK:
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ break;
+
+ case MPI2_EVENT_TEMP_THRESHOLD:
+ _scsih_temp_threshold_events(ioc,
+ (Mpi2EventDataTemperature_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
+ ActiveCableEventData =
+ (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
+ switch (ActiveCableEventData->ReasonCode) {
+ case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
+ ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
+ pr_notice("cannot be powered and devices connected\n");
+ pr_notice("to this active cable will not be seen\n");
+ pr_notice("This active cable requires %d mW of power\n",
+ le32_to_cpu(
+ ActiveCableEventData->ActiveCablePowerRequirement));
+ break;
+
+ case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
+ ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
+ pr_notice(
+ "is not running at optimal speed(12 Gb/s rate)\n");
+ break;
+ }
+
break;
default: /* ignore the rest */
return 1;
}
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
- if (!fw_event) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 1;
- }
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
- if (!fw_event->event_data) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- kfree(fw_event);
+ fw_event = alloc_fw_event_work(sz);
+ if (!fw_event) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -7219,55 +11019,25 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
fw_event->VP_ID = mpi_reply->VP_ID;
fw_event->event = event;
_scsih_fw_event_add(ioc, fw_event);
+ fw_event_work_put(fw_event);
return 1;
}
-/* shost template */
-static struct scsi_host_template scsih_driver_template = {
- .module = THIS_MODULE,
- .name = "Fusion MPT SAS Host",
- .proc_name = MPT3SAS_DRIVER_NAME,
- .queuecommand = _scsih_qcmd,
- .target_alloc = _scsih_target_alloc,
- .slave_alloc = _scsih_slave_alloc,
- .slave_configure = _scsih_slave_configure,
- .target_destroy = _scsih_target_destroy,
- .slave_destroy = _scsih_slave_destroy,
- .scan_finished = _scsih_scan_finished,
- .scan_start = _scsih_scan_start,
- .change_queue_depth = _scsih_change_queue_depth,
- .change_queue_type = _scsih_change_queue_type,
- .eh_abort_handler = _scsih_abort,
- .eh_device_reset_handler = _scsih_dev_reset,
- .eh_target_reset_handler = _scsih_target_reset,
- .eh_host_reset_handler = _scsih_host_reset,
- .bios_param = _scsih_bios_param,
- .can_queue = 1,
- .this_id = -1,
- .sg_tablesize = MPT3SAS_SG_DEPTH,
- .max_sectors = 32767,
- .cmd_per_lun = 7,
- .use_clustering = ENABLE_CLUSTERING,
- .shost_attrs = mpt3sas_host_attrs,
- .sdev_attrs = mpt3sas_dev_attrs,
-};
-
/**
* _scsih_expander_node_remove - removing expander device from list.
* @ioc: per adapter object
* @sas_expander: the sas_device object
- * Context: Calling function should acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
- *
- * Return nothing.
*/
static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander)
{
struct _sas_port *mpt3sas_port, *next;
+ unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -7277,36 +11047,111 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
- sas_expander->sas_address_parent);
+ sas_expander->sas_address_parent, sas_expander->port);
- pr_info(MPT3SAS_FMT
- "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
+ ioc_info(ioc,
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ sas_expander->sas_address,
+ port_id);
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
kfree(sas_expander->phy);
kfree(sas_expander);
}
/**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+
+ /* are there any NVMe devices ? */
+ if (list_empty(&ioc->pcie_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+ goto out;
+ }
+
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc,
+ "%s: failed obtaining a smid\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ /* Wait for max_shutdown_latency seconds */
+ ioc_info(ioc,
+ "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+ ioc->max_shutdown_latency);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ ioc->max_shutdown_latency*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ ioc_err(ioc, "%s: timeout\n", __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_info(ioc, "Io Unit Control shutdown (complete):"
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
+/**
* _scsih_ir_shutdown - IR shutdown notification
* @ioc: per adapter object
*
* Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
* the host system is shutting down.
- *
- * Return nothing.
*/
static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
@@ -7326,16 +11171,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
goto out;
}
ioc->scsih_cmds.status = MPT3_CMD_PENDING;
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
goto out;
}
@@ -7347,23 +11190,23 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
- pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ if (!ioc->hide_ir_msg)
+ ioc_info(ioc, "IR shutdown (sending)\n");
init_completion(&ioc->scsih_cmds.done);
- mpt3sas_base_put_smid_default(ioc, smid);
+ ioc->put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
goto out;
}
if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->scsih_cmds.reply;
- pr_info(MPT3SAS_FMT
- "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ if (!ioc->hide_ir_msg)
+ ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
out:
@@ -7372,23 +11215,62 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove - detach and remove add host
+ * _scsih_get_shost_and_ioc - get shost and ioc
+ * and verify whether they are NULL or not
+ * @pdev: PCI device struct
+ * @shost: address of scsi host pointer
+ * @ioc: address of HBA adapter pointer
+ *
+ * Return zero if *shost and *ioc are not NULL otherwise return error number.
+ */
+static int
+_scsih_get_shost_and_ioc(struct pci_dev *pdev,
+ struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
+{
+ *shost = pci_get_drvdata(pdev);
+ if (*shost == NULL) {
+ dev_err(&pdev->dev, "pdev's driver data is null\n");
+ return -ENXIO;
+ }
+
+ *ioc = shost_priv(*shost);
+ if (*ioc == NULL) {
+ dev_err(&pdev->dev, "shost's private data is null\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/**
+ * scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
* Routine called when unloading the driver.
- * Return nothing.
*/
-static void _scsih_remove(struct pci_dev *pdev)
+static void scsih_remove(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _pcie_device *pcie_device, *pcienext;
struct workqueue_struct *wq;
unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ struct hba_port *port, *port_next;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc->remove_host = 1;
+
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
+ _scsih_flush_running_cmds(ioc);
+ }
+
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -7397,9 +11279,17 @@ static void _scsih_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
-
+ /*
+ * Copy back the unmodified ioc page1. so that on next driver load,
+ * current modified changes on ioc page1 won't take effect.
+ */
+ if (ioc->is_aero_ioc)
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+ &ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ mpt3sas_destroy_debugfs(ioc);
+ sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
if (raid_device->starget) {
@@ -7408,11 +11298,16 @@ static void _scsih_remove(struct pci_dev *pdev)
sas_target_priv_data->deleted = 1;
scsi_remove_target(&raid_device->starget->dev);
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
+ list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
+ list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
/* free ports attached to the sas_host */
list_for_each_entry_safe(mpt3sas_port, next_port,
@@ -7420,13 +11315,21 @@ static void _scsih_remove(struct pci_dev *pdev)
if (mpt3sas_port->remote_identify.device_type ==
SAS_END_DEVICE)
mpt3sas_device_remove_by_sas_address(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
else if (mpt3sas_port->remote_identify.device_type ==
SAS_EDGE_EXPANDER_DEVICE ||
mpt3sas_port->remote_identify.device_type ==
SAS_FANOUT_EXPANDER_DEVICE)
mpt3sas_expander_remove(ioc,
- mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_port->hba_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &ioc->port_table_list, list) {
+ list_del(&port->list);
+ kfree(port);
}
/* free phys attached to the sas_host */
@@ -7436,28 +11339,37 @@ static void _scsih_remove(struct pci_dev *pdev)
ioc->sas_hba.num_phys = 0;
}
- sas_remove_host(shost);
mpt3sas_base_detach(ioc);
+ mpt3sas_ctl_release(ioc);
+ spin_lock(&gioc_lock);
list_del(&ioc->list);
- scsi_remove_host(shost);
+ spin_unlock(&gioc_lock);
scsi_host_put(shost);
}
/**
- * _scsih_shutdown - routine call during system shutdown
+ * scsih_shutdown - routine call during system shutdown
* @pdev: PCI device struct
- *
- * Return nothing.
*/
static void
-_scsih_shutdown(struct pci_dev *pdev)
+scsih_shutdown(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct workqueue_struct *wq;
unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc->remove_host = 1;
+
+ if (!pci_device_is_present(pdev)) {
+ mpt3sas_base_pause_mq_polling(ioc);
+ _scsih_flush_running_cmds(ioc);
+ }
+
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -7466,9 +11378,23 @@ _scsih_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
+ /*
+ * Copy back the unmodified ioc page1 so that on next driver load,
+ * current modified changes on ioc page1 won't take effect.
+ */
+ if (ioc->is_aero_ioc)
+ mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
+ &ioc->ioc_pg1_copy);
_scsih_ir_shutdown(ioc);
- mpt3sas_base_detach(ioc);
+ _scsih_nvme_shutdown(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_stop_watchdog(ioc);
+ ioc->shost_recovery = 1;
+ mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ mpt3sas_base_free_irq(ioc);
+ mpt3sas_base_disable_msix(ioc);
}
@@ -7483,59 +11409,98 @@ _scsih_shutdown(struct pci_dev *pdev)
static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
{
- u8 is_raid;
+ u32 channel;
void *device;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
u16 handle;
u64 sas_address_parent;
u64 sas_address;
unsigned long flags;
int rc;
+ int tid;
+ struct hba_port *port;
/* no Bios, return immediately */
if (!ioc->bios_pg3.BiosVersion)
return;
device = NULL;
- is_raid = 0;
if (ioc->req_boot_device.device) {
device = ioc->req_boot_device.device;
- is_raid = ioc->req_boot_device.is_raid;
+ channel = ioc->req_boot_device.channel;
} else if (ioc->req_alt_boot_device.device) {
device = ioc->req_alt_boot_device.device;
- is_raid = ioc->req_alt_boot_device.is_raid;
+ channel = ioc->req_alt_boot_device.channel;
} else if (ioc->current_boot_device.device) {
device = ioc->current_boot_device.device;
- is_raid = ioc->current_boot_device.is_raid;
+ channel = ioc->current_boot_device.channel;
}
if (!device)
return;
- if (is_raid) {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
+ /*
+ * If this boot vd is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (raid_device->starget)
+ return;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
+ } else if (channel == PCIE_CHANNEL) {
+ pcie_device = device;
+ /*
+ * If this boot NVMe device is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (pcie_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ tid = pcie_device->id;
+ list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
+ if (rc)
+ _scsih_pcie_device_remove(ioc, pcie_device);
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
+ /*
+ * If this boot sas/sata device is already registered with SML
+ * then no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (sas_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
+ port = sas_device->port;
list_move_tail(&sas_device->list, &ioc->sas_device_list);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (ioc->hide_drives)
+ return;
+
+ if (!port)
+ return;
+
if (!mpt3sas_transport_port_add(ioc, handle,
- sas_address_parent)) {
+ sas_address_parent, port)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_address,
- sas_address_parent);
+ sas_address_parent, port);
_scsih_sas_device_remove(ioc, sas_device);
}
}
@@ -7565,6 +11530,48 @@ _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
}
}
+static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ if (!list_empty(&ioc->sas_device_init_list)) {
+ sas_device = list_first_entry(&ioc->sas_device_init_list,
+ struct _sas_device, list);
+ sas_device_get(sas_device);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return sas_device;
+}
+
+static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Since we dropped the lock during the call to port_add(), we need to
+ * be careful here that somebody else didn't move or delete this item
+ * while we were busy with other things.
+ *
+ * If it was on the list, we need a put() for the reference the list
+ * had. Either way, we need a get() for the destination list.
+ */
+ if (!list_empty(&sas_device->list)) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
+ sas_device_get(sas_device);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
/**
* _scsih_probe_sas - reporting sas devices to sas transport
* @ioc: per adapter object
@@ -7574,17 +11581,16 @@ _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
{
- struct _sas_device *sas_device, *next;
- unsigned long flags;
+ struct _sas_device *sas_device;
- /* SAS Device List */
- list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
- list) {
+ if (ioc->hide_drives)
+ return;
+ while ((sas_device = get_next_sas_device(ioc))) {
if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent)) {
- list_del(&sas_device->list);
- kfree(sas_device);
+ sas_device->sas_address_parent, sas_device->port)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ sas_device_put(sas_device);
continue;
} else if (!sas_device->starget) {
/*
@@ -7596,16 +11602,116 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading) {
mpt3sas_transport_port_remove(ioc,
sas_device->sas_address,
- sas_device->sas_address_parent);
- list_del(&sas_device->list);
- kfree(sas_device);
+ sas_device->sas_address_parent,
+ sas_device->port);
+ _scsih_sas_device_remove(ioc, sas_device);
+ sas_device_put(sas_device);
continue;
}
}
+ sas_device_make_active(ioc, sas_device);
+ sas_device_put(sas_device);
+ }
+}
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_move_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+/**
+ * get_next_pcie_device - Get the next pcie device
+ * @ioc: per adapter object
+ *
+ * Get the next pcie device from pcie_device_init_list list.
+ *
+ * Return: pcie device structure if pcie_device_init_list list is not empty
+ * otherwise returns NULL
+ */
+static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&ioc->pcie_device_init_list)) {
+ pcie_device = list_first_entry(&ioc->pcie_device_init_list,
+ struct _pcie_device, list);
+ pcie_device_get(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * pcie_device_make_active - Add pcie device to pcie_device_list list
+ * @ioc: per adapter object
+ * @pcie_device: pcie device object
+ *
+ * Add the pcie device which has registered with SCSI Transport Later to
+ * pcie_device_list list
+ */
+static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ int rc;
+
+ /* PCIe Device List */
+ while ((pcie_device = get_next_pcie_device(ioc))) {
+ if (pcie_device->starget) {
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
+ pcie_device->id, 0);
+ if (rc) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ } else if (!pcie_device->starget) {
+ /*
+ * When async scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ /* TODO-- Need to find out whether this condition will
+ * occur or not
+ */
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ }
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
}
}
@@ -7637,12 +11743,14 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_probe_sas(ioc);
_scsih_probe_raid(ioc);
}
- } else
+ } else {
_scsih_probe_sas(ioc);
+ _scsih_probe_pcie(ioc);
+ }
}
/**
- * _scsih_scan_start - scsi lld callback for .scan_start
+ * scsih_scan_start - scsi lld callback for .scan_start
* @shost: SCSI host pointer
*
* The shost has the ability to discover targets on its own instead
@@ -7650,12 +11758,14 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
* firmware discovery.
*/
static void
-_scsih_scan_start(struct Scsi_Host *shost)
+scsih_scan_start(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
@@ -7664,11 +11774,30 @@ _scsih_scan_start(struct Scsi_Host *shost)
rc = mpt3sas_port_enable(ioc);
if (rc != 0)
- pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+ ioc_info(ioc, "port enable: FAILED\n");
+}
+
+/**
+ * _scsih_complete_devices_scanning - add the devices to sml and
+ * complete ioc initialization.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
}
/**
- * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * scsih_scan_finished - scsi lld callback for .scan_finished
* @shost: SCSI host pointer
* @time: elapsed time of the scan in jiffies
*
@@ -7677,9 +11806,11 @@ _scsih_scan_start(struct Scsi_Host *shost)
* we wait for firmware discovery to complete, then return 1.
*/
static int
-_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 ioc_state;
+ int issue_hard_reset = 0;
if (disable_discovery > 0) {
ioc->is_driver_loading = 0;
@@ -7688,65 +11819,409 @@ _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
if (time >= (300 * HZ)) {
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with timeout (timeout=300s)\n",
- ioc->name);
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
ioc->is_driver_loading = 0;
return 1;
}
- if (ioc->start_scan)
+ if (ioc->start_scan) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ issue_hard_reset = 1;
+ goto out;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_base_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ issue_hard_reset = 1;
+ goto out;
+ }
return 0;
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
+ ioc_info(ioc,
+ "port enable: aborted due to diag reset\n");
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
if (ioc->start_scan_failed) {
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with (ioc_status=0x%08x)\n",
- ioc->name, ioc->start_scan_failed);
+ ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->start_scan_failed);
ioc->is_driver_loading = 0;
ioc->wait_for_discovery_to_complete = 0;
ioc->remove_host = 1;
return 1;
}
- pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
- ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ ioc_info(ioc, "port enable: SUCCESS\n");
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ _scsih_complete_devices_scanning(ioc);
- if (ioc->wait_for_discovery_to_complete) {
- ioc->wait_for_discovery_to_complete = 0;
- _scsih_probe_devices(ioc);
+out:
+ if (issue_hard_reset) {
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
+ ioc->is_driver_loading = 0;
}
- mpt3sas_base_start_watchdog(ioc);
- ioc->is_driver_loading = 0;
return 1;
}
/**
+ * scsih_map_queues - map reply queues with request queues
+ * @shost: SCSI host pointer
+ */
+static void scsih_map_queues(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ (struct MPT3SAS_ADAPTER *)shost->hostdata;
+ struct blk_mq_queue_map *map;
+ int i, qoff, offset;
+ int nr_msix_vectors = ioc->iopoll_q_start_index;
+ int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
+
+ if (shost->nr_hw_queues == 1)
+ return;
+
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ map = &shost->tag_set.map[i];
+ map->nr_queues = 0;
+ offset = 0;
+ if (i == HCTX_TYPE_DEFAULT) {
+ map->nr_queues =
+ nr_msix_vectors - ioc->high_iops_queues;
+ offset = ioc->high_iops_queues;
+ } else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = iopoll_q_count;
+
+ if (!map->nr_queues)
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ }
+}
+
+/* shost template for SAS 2.0 HBA devices */
+static const struct scsi_host_template mpt2sas_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT2SAS_DRIVER_NAME,
+ .queuecommand = scsih_qcmd,
+ .target_alloc = scsih_target_alloc,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
+ .target_destroy = scsih_target_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
+ .scan_finished = scsih_scan_finished,
+ .scan_start = scsih_scan_start,
+ .change_queue_depth = scsih_change_queue_depth,
+ .eh_abort_handler = scsih_abort,
+ .eh_device_reset_handler = scsih_dev_reset,
+ .eh_target_reset_handler = scsih_target_reset,
+ .eh_host_reset_handler = scsih_host_reset,
+ .bios_param = scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT2SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
+};
+
+/* raid transport support for SAS 2.0 HBA devices */
+static struct raid_function_template mpt2sas_raid_functions = {
+ .cookie = &mpt2sas_driver_template,
+ .is_raid = scsih_is_raid,
+ .get_resync = scsih_get_resync,
+ .get_state = scsih_get_state,
+};
+
+/* shost template for SAS 3.0 HBA devices */
+static const struct scsi_host_template mpt3sas_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT3SAS_DRIVER_NAME,
+ .queuecommand = scsih_qcmd,
+ .target_alloc = scsih_target_alloc,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
+ .target_destroy = scsih_target_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
+ .scan_finished = scsih_scan_finished,
+ .scan_start = scsih_scan_start,
+ .change_queue_depth = scsih_change_queue_depth,
+ .eh_abort_handler = scsih_abort,
+ .eh_device_reset_handler = scsih_dev_reset,
+ .eh_target_reset_handler = scsih_target_reset,
+ .eh_host_reset_handler = scsih_host_reset,
+ .bios_param = scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT3SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .max_segment_size = 0xffffffff,
+ .cmd_per_lun = 128,
+ .shost_groups = mpt3sas_host_groups,
+ .sdev_groups = mpt3sas_dev_groups,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
+ .map_queues = scsih_map_queues,
+ .mq_poll = mpt3sas_blk_mq_poll,
+};
+
+/* raid transport support for SAS 3.0 HBA devices */
+static struct raid_function_template mpt3sas_raid_functions = {
+ .cookie = &mpt3sas_driver_template,
+ .is_raid = scsih_is_raid,
+ .get_resync = scsih_get_resync,
+ .get_state = scsih_get_state,
+};
+
+/**
+ * _scsih_determine_hba_mpi_version - determine in which MPI version class
+ * this device belongs to.
+ * @pdev: PCI device struct
+ *
+ * return MPI2_VERSION for SAS 2.0 HBA devices,
+ * MPI25_VERSION for SAS 3.0 HBA devices, and
+ * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
+ */
+static u16
+_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
+{
+
+ switch (pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SSS6200:
+ case MPI2_MFGPAGE_DEVID_SAS2004:
+ case MPI2_MFGPAGE_DEVID_SAS2008:
+ case MPI2_MFGPAGE_DEVID_SAS2108_1:
+ case MPI2_MFGPAGE_DEVID_SAS2108_2:
+ case MPI2_MFGPAGE_DEVID_SAS2108_3:
+ case MPI2_MFGPAGE_DEVID_SAS2116_1:
+ case MPI2_MFGPAGE_DEVID_SAS2116_2:
+ case MPI2_MFGPAGE_DEVID_SAS2208_1:
+ case MPI2_MFGPAGE_DEVID_SAS2208_2:
+ case MPI2_MFGPAGE_DEVID_SAS2208_3:
+ case MPI2_MFGPAGE_DEVID_SAS2208_4:
+ case MPI2_MFGPAGE_DEVID_SAS2208_5:
+ case MPI2_MFGPAGE_DEVID_SAS2208_6:
+ case MPI2_MFGPAGE_DEVID_SAS2308_1:
+ case MPI2_MFGPAGE_DEVID_SAS2308_2:
+ case MPI2_MFGPAGE_DEVID_SAS2308_3:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
+ return MPI2_VERSION;
+ case MPI25_MFGPAGE_DEVID_SAS3004:
+ case MPI25_MFGPAGE_DEVID_SAS3008:
+ case MPI25_MFGPAGE_DEVID_SAS3108_1:
+ case MPI25_MFGPAGE_DEVID_SAS3108_2:
+ case MPI25_MFGPAGE_DEVID_SAS3108_5:
+ case MPI25_MFGPAGE_DEVID_SAS3108_6:
+ return MPI25_VERSION;
+ case MPI26_MFGPAGE_DEVID_SAS3216:
+ case MPI26_MFGPAGE_DEVID_SAS3224:
+ case MPI26_MFGPAGE_DEVID_SAS3316_1:
+ case MPI26_MFGPAGE_DEVID_SAS3316_2:
+ case MPI26_MFGPAGE_DEVID_SAS3316_3:
+ case MPI26_MFGPAGE_DEVID_SAS3316_4:
+ case MPI26_MFGPAGE_DEVID_SAS3324_1:
+ case MPI26_MFGPAGE_DEVID_SAS3324_2:
+ case MPI26_MFGPAGE_DEVID_SAS3324_3:
+ case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
+ case MPI26_ATLAS_PCIe_SWITCH_DEVID:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
+ return MPI26_VERSION;
+ }
+ return 0;
+}
+
+/**
* _scsih_probe - attach and add scsi host
* @pdev: PCI device struct
* @id: pci device id
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
static int
_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct MPT3SAS_ADAPTER *ioc;
- struct Scsi_Host *shost;
+ struct Scsi_Host *shost = NULL;
+ int rv;
+ u16 hba_mpi_version;
+ int iopoll_q_count = 0;
+
+ /* Determine in which MPI version class this pci device belongs */
+ hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
+ if (hba_mpi_version == 0)
+ return -ENODEV;
- shost = scsi_host_alloc(&scsih_driver_template,
- sizeof(struct MPT3SAS_ADAPTER));
- if (!shost)
+ /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
+ * for other generation HBA's return with -ENODEV
+ */
+ if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
return -ENODEV;
- /* init local params */
- ioc = shost_priv(shost);
- memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
+ * for other generation HBA's return with -ENODEV
+ */
+ if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
+ || hba_mpi_version == MPI26_VERSION)))
+ return -ENODEV;
+
+ switch (hba_mpi_version) {
+ case MPI2_VERSION:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+ /* Use mpt2sas driver host template for SAS 2.0 HBA's */
+ shost = scsi_host_alloc(&mpt2sas_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ ioc->hba_mpi_version_belonged = hba_mpi_version;
+ ioc->id = mpt2_ids++;
+ sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SSS6200:
+ ioc->is_warpdrive = 1;
+ ioc->hide_ir_msg = 1;
+ break;
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
+ case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
+ ioc->is_mcpu_endpoint = 1;
+ break;
+ default:
+ ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
+ break;
+ }
+
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
+ /* Use mpt3sas driver host template for SAS 3.0 HBA's */
+ shost = scsi_host_alloc(&mpt3sas_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ ioc->hba_mpi_version_belonged = hba_mpi_version;
+ ioc->id = mpt3_ids++;
+ sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+ switch (pdev->device) {
+ case MPI26_MFGPAGE_DEVID_SAS3508:
+ case MPI26_MFGPAGE_DEVID_SAS3508_1:
+ case MPI26_MFGPAGE_DEVID_SAS3408:
+ case MPI26_MFGPAGE_DEVID_SAS3516:
+ case MPI26_MFGPAGE_DEVID_SAS3516_1:
+ case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
+ case MPI26_ATLAS_PCIe_SWITCH_DEVID:
+ ioc->is_gen35_ioc = 1;
+ break;
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
+ dev_info(&pdev->dev,
+ "HBA is in Configurable Secure mode\n");
+ fallthrough;
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
+ ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
+ break;
+ default:
+ ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
+ }
+ if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
+ pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+ ioc->combined_reply_queue = 1;
+ if (ioc->is_gen35_ioc)
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+ else
+ ioc->combined_reply_index_count =
+ MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+ }
+
+ switch (ioc->is_gen35_ioc) {
+ case 0:
+ if (multipath_on_hba == -1 || multipath_on_hba == 0)
+ ioc->multipath_on_hba = 0;
+ else
+ ioc->multipath_on_hba = 1;
+ break;
+ case 1:
+ if (multipath_on_hba == -1 || multipath_on_hba > 0)
+ ioc->multipath_on_hba = 1;
+ else
+ ioc->multipath_on_hba = 0;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&ioc->list);
+ spin_lock(&gioc_lock);
list_add_tail(&ioc->list, &mpt3sas_ioc_list);
+ spin_unlock(&gioc_lock);
ioc->shost = shost;
- ioc->id = mpt_ids++;
- sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id);
ioc->pdev = pdev;
ioc->scsi_io_cb_idx = scsi_io_cb_idx;
ioc->tm_cb_idx = tm_cb_idx;
@@ -7761,24 +12236,48 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* Host waits for minimum of six seconds */
+ ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+ /*
+ * Enable MEMORY MOVE support flag.
+ */
+ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
+ /* Enable ADDITIONAL QUERY support flag. */
+ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
+
+ ioc->enable_sdev_max_qd = enable_sdev_max_qd;
+
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
+ mutex_init(&ioc->hostdiag_unlock_mutex);
+ /* initializing pci_access_mutex lock */
+ mutex_init(&ioc->pci_access_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
spin_lock_init(&ioc->scsi_lookup_lock);
spin_lock_init(&ioc->sas_device_lock);
spin_lock_init(&ioc->sas_node_lock);
spin_lock_init(&ioc->fw_event_lock);
spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->pcie_device_lock);
spin_lock_init(&ioc->diag_trigger_lock);
INIT_LIST_HEAD(&ioc->sas_device_list);
INIT_LIST_HEAD(&ioc->sas_device_init_list);
INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->enclosure_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_init_list);
INIT_LIST_HEAD(&ioc->fw_event_list);
INIT_LIST_HEAD(&ioc->raid_device_list);
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_sc_list);
+ INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+ INIT_LIST_HEAD(&ioc->port_table_list);
+
+ sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
/* init shost parameters */
shost->max_cmd_len = 32;
@@ -7786,36 +12285,31 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->transportt = mpt3sas_transport_template;
shost->unique_id = ioc->id;
- if (max_sectors != 0xFFFF) {
- if (max_sectors < 64) {
- shost->max_sectors = 64;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. Assigning "
- "value of 64.\n", ioc->name, max_sectors);
- } else if (max_sectors > 32767) {
- shost->max_sectors = 32767;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. Assigning "
- "default value of 32767.\n", ioc->name,
- max_sectors);
- } else {
- shost->max_sectors = max_sectors & 0xFFFE;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ if (ioc->is_mcpu_endpoint) {
+ /* mCPU MPI support 64K max IO */
+ shost->max_sectors = 128;
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
+ } else {
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+ max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
+ }
}
}
-
- if ((scsi_add_host(shost, &pdev->dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- list_del(&ioc->list);
- goto out_add_shost_fail;
- }
-
/* register EEDP capabilities with SCSI layer */
- if (prot_mask > 0)
- scsi_host_set_prot(shost, prot_mask);
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, (prot_mask & 0x07));
else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -7824,115 +12318,160 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
/* event thread */
- snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
- "fw_event%d", ioc->id);
- ioc->firmware_event_thread = create_singlethread_workqueue(
- ioc->firmware_event_name);
+ ioc->firmware_event_thread = alloc_ordered_workqueue(
+ "fw_event_%s%d", 0, ioc->driver_name, ioc->id);
if (!ioc->firmware_event_thread) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_thread_fail;
}
+ shost->host_tagset = 0;
+
+ if (ioc->is_gen35_ioc && host_tagset_enable)
+ shost->host_tagset = 1;
+
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rv = -ENODEV;
goto out_attach_fail;
}
+
+ if (ioc->is_warpdrive) {
+ if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
+ ioc->hide_drives = 0;
+ else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
+ ioc->hide_drives = 1;
+ else {
+ if (mpt3sas_get_num_volumes(ioc))
+ ioc->hide_drives = 1;
+ else
+ ioc->hide_drives = 0;
+ }
+ } else
+ ioc->hide_drives = 0;
+
+ shost->nr_hw_queues = 1;
+
+ if (shost->host_tagset) {
+ shost->nr_hw_queues =
+ ioc->reply_queue_count - ioc->high_iops_queues;
+
+ iopoll_q_count =
+ ioc->reply_queue_count - ioc->iopoll_q_start_index;
+
+ shost->nr_maps = iopoll_q_count ? 3 : 1;
+
+ dev_info(&ioc->pdev->dev,
+ "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
+ shost->can_queue, shost->nr_hw_queues);
+ }
+
+ rv = scsi_add_host(shost, &pdev->dev);
+ if (rv) {
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_add_shost_fail;
+ }
+
scsi_scan_host(shost);
+ mpt3sas_setup_debugfs(ioc);
return 0;
-
+out_add_shost_fail:
+ mpt3sas_base_detach(ioc);
out_attach_fail:
destroy_workqueue(ioc->firmware_event_thread);
out_thread_fail:
+ spin_lock(&gioc_lock);
list_del(&ioc->list);
- scsi_remove_host(shost);
- out_add_shost_fail:
+ spin_unlock(&gioc_lock);
scsi_host_put(shost);
- return -ENODEV;
+ return rv;
}
-#ifdef CONFIG_PM
/**
- * _scsih_suspend - power management suspend main entry point
- * @pdev: PCI device struct
- * @state: PM state change to (usually PCI_D3)
+ * scsih_suspend - power management suspend main entry point
+ * @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
-static int
-_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+scsih_suspend(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pci_power_t device_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ int rc;
+
+ rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (rc)
+ return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
- device_state = pci_choose_state(pdev, state);
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ _scsih_nvme_shutdown(ioc);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
- pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
- pci_set_power_state(pdev, device_state);
return 0;
}
/**
- * _scsih_resume - power management resume main entry point
- * @pdev: PCI device struct
+ * scsih_resume - power management resume main entry point
+ * @dev: Device struct
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
-static int
-_scsih_resume(struct pci_dev *pdev)
+static int __maybe_unused
+scsih_resume(struct device *dev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state = pdev->current_state;
int r;
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (r)
+ return r;
+
+ ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
ioc->pdev = pdev;
r = mpt3sas_base_map_resources(ioc);
if (r)
return r;
-
- mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
+ mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
scsi_unblock_requests(shost);
mpt3sas_base_start_watchdog(ioc);
return 0;
}
-#endif /* CONFIG_PM */
/**
- * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * scsih_pci_error_detected - Called when a PCI error is detected.
* @pdev: PCI device struct
* @state: PCI channel state
*
* Description: Called when a PCI error is detected.
*
- * Return value:
- * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
*/
static pci_ers_result_t
-_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
- pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
- ioc->name, state);
+ ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -7948,6 +12487,7 @@ _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* Permanent error, prepare for device removal */
ioc->pci_error_recovery = 1;
mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_pause_mq_polling(ioc);
_scsih_flush_running_cmds(ioc);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -7955,7 +12495,7 @@ _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
}
/**
- * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * scsih_pci_slot_reset - Called when PCI slot has been reset.
* @pdev: PCI device struct
*
* Description: This routine is called by the pci error recovery
@@ -7963,14 +12503,16 @@ _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
* should resume normal operations.
*/
static pci_ers_result_t
-_scsih_pci_slot_reset(struct pci_dev *pdev)
+scsih_pci_slot_reset(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
int rc;
- pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
- ioc->name);
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
ioc->pdev = pdev;
@@ -7979,11 +12521,11 @@ _scsih_pci_slot_reset(struct pci_dev *pdev)
if (rc)
return PCI_ERS_RESULT_DISCONNECT;
- rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
- FORCE_BIG_HAMMER);
+ ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
- (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "hard reset: %s\n",
+ (rc == 0) ? "success" : "failed");
if (!rc)
return PCI_ERS_RESULT_RECOVERED;
@@ -7992,7 +12534,7 @@ _scsih_pci_slot_reset(struct pci_dev *pdev)
}
/**
- * _scsih_pci_resume() - resume normal ops after PCI reset
+ * scsih_pci_resume() - resume normal ops after PCI reset
* @pdev: pointer to PCI device
*
* Called when the error recovery driver tells us that its
@@ -8000,99 +12542,228 @@ _scsih_pci_slot_reset(struct pci_dev *pdev)
* halted scsi ops to resume.
*/
static void
-_scsih_pci_resume(struct pci_dev *pdev)
+scsih_pci_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
- pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+ ioc_info(ioc, "PCI error: resume callback!!\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
mpt3sas_base_start_watchdog(ioc);
scsi_unblock_requests(ioc->shost);
}
/**
- * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
* @pdev: pointer to PCI device
*/
static pci_ers_result_t
-_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+scsih_pci_mmio_enabled(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
- pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
- ioc->name);
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
+ /* This called only if scsih_pci_error_detected returns
+ * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+ * works, no need to reset slot.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
}
-/* raid transport support */
-static struct raid_function_template mpt3sas_raid_functions = {
- .cookie = &scsih_driver_template,
- .is_raid = _scsih_is_raid,
- .get_resync = _scsih_get_resync,
- .get_state = _scsih_get_state,
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static const struct pci_device_id mpt3sas_pci_table[] = {
+ /* Spitfire ~ 2004 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Falcon ~ 2008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Liberator ~ 2108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Meteor ~ 2116 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Thunderbolt ~ 2208 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Mustang ~ 2308 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* SSS6200 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Fury ~ 3004 and 3008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Invader ~ 3108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Cutlass ~ 3216 and 3224 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Intruder ~ 3316 and 3324 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Mercator ~ 3616*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /* Aero SI 0x00E1 Configurable Secure
+ * 0x00E2 Hard Secure
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
+ * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /* Atlas PCIe Switch Management Port */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /* Sea SI 0x00E5 Configurable Secure
+ * 0x00E6 Hard Secure
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
+ * ATTO Branded ExpressSAS H12xx GT
+ */
+ { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
+ * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ {0} /* Terminating entry */
};
+MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
-static struct pci_error_handlers _scsih_err_handler = {
- .error_detected = _scsih_pci_error_detected,
- .mmio_enabled = _scsih_pci_mmio_enabled,
- .slot_reset = _scsih_pci_slot_reset,
- .resume = _scsih_pci_resume,
+static const struct pci_error_handlers _mpt3sas_err_handler = {
+ .error_detected = scsih_pci_error_detected,
+ .mmio_enabled = scsih_pci_mmio_enabled,
+ .slot_reset = scsih_pci_slot_reset,
+ .resume = scsih_pci_resume,
};
-static struct pci_driver scsih_driver = {
+static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
+
+static struct pci_driver mpt3sas_driver = {
.name = MPT3SAS_DRIVER_NAME,
- .id_table = scsih_pci_table,
+ .id_table = mpt3sas_pci_table,
.probe = _scsih_probe,
- .remove = _scsih_remove,
- .shutdown = _scsih_shutdown,
- .err_handler = &_scsih_err_handler,
-#ifdef CONFIG_PM
- .suspend = _scsih_suspend,
- .resume = _scsih_resume,
-#endif
+ .remove = scsih_remove,
+ .shutdown = scsih_shutdown,
+ .err_handler = &_mpt3sas_err_handler,
+ .driver.pm = &scsih_pm_ops,
};
-
/**
- * _scsih_init - main entry point for this driver.
+ * scsih_init - main entry point for this driver.
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
-static int __init
-_scsih_init(void)
+static int
+scsih_init(void)
{
- int error;
-
- mpt_ids = 0;
-
- pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
- MPT3SAS_DRIVER_VERSION);
-
- mpt3sas_transport_template =
- sas_attach_transport(&mpt3sas_transport_functions);
- if (!mpt3sas_transport_template)
- return -ENODEV;
-
-/* raid transport support */
- mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions);
- if (!mpt3sas_raid_template) {
- sas_release_transport(mpt3sas_transport_template);
- return -ENODEV;
- }
+ mpt2_ids = 0;
+ mpt3_ids = 0;
mpt3sas_base_initialize_callback_handler();
/* queuecommand callback hander */
scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
- /* task managment callback handler */
+ /* task management callback handler */
tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
/* base internal commands callback handler */
@@ -8123,33 +12794,18 @@ _scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete);
- mpt3sas_ctl_init();
-
- error = pci_register_driver(&scsih_driver);
- if (error) {
- /* raid transport support */
- raid_class_release(mpt3sas_raid_template);
- sas_release_transport(mpt3sas_transport_template);
- }
-
- return error;
+ mpt3sas_init_debugfs();
+ return 0;
}
/**
- * _scsih_exit - exit point for this driver (when it is a module).
+ * scsih_exit - exit point for this driver (when it is a module).
*
- * Returns 0 success, anything else error.
+ * Return: 0 success, anything else error.
*/
-static void __exit
-_scsih_exit(void)
+static void
+scsih_exit(void)
{
- pr_info("mpt3sas version %s unloading\n",
- MPT3SAS_DRIVER_VERSION);
-
- mpt3sas_ctl_exit();
-
- pci_unregister_driver(&scsih_driver);
-
mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
mpt3sas_base_release_callback_handler(tm_cb_idx);
@@ -8165,9 +12821,89 @@ _scsih_exit(void)
mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
/* raid transport support */
- raid_class_release(mpt3sas_raid_template);
+ if (hbas_to_enumerate != 1)
+ raid_class_release(mpt3sas_raid_template);
+ if (hbas_to_enumerate != 2)
+ raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template);
+ mpt3sas_exit_debugfs();
+}
+
+/**
+ * _mpt3sas_init - main entry point for this driver.
+ *
+ * Return: 0 success, anything else error.
+ */
+static int __init
+_mpt3sas_init(void)
+{
+ int error;
+
+ pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_transport_template =
+ sas_attach_transport(&mpt3sas_transport_functions);
+ if (!mpt3sas_transport_template)
+ return -ENODEV;
+
+ /* No need attach mpt3sas raid functions template
+ * if hbas_to_enumarate value is one.
+ */
+ if (hbas_to_enumerate != 1) {
+ mpt3sas_raid_template =
+ raid_class_attach(&mpt3sas_raid_functions);
+ if (!mpt3sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+ }
+
+ /* No need to attach mpt2sas raid functions template
+ * if hbas_to_enumarate value is two
+ */
+ if (hbas_to_enumerate != 2) {
+ mpt2sas_raid_template =
+ raid_class_attach(&mpt2sas_raid_functions);
+ if (!mpt2sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+ }
+
+ error = scsih_init();
+ if (error) {
+ scsih_exit();
+ return error;
+ }
+
+ mpt3sas_ctl_init(hbas_to_enumerate);
+
+ error = pci_register_driver(&mpt3sas_driver);
+ if (error) {
+ mpt3sas_ctl_exit(hbas_to_enumerate);
+ scsih_exit();
+ }
+
+ return error;
+}
+
+/**
+ * _mpt3sas_exit - exit point for this driver (when it is a module).
+ *
+ */
+static void __exit
+_mpt3sas_exit(void)
+{
+ pr_info("mpt3sas version %s unloading\n",
+ MPT3SAS_DRIVER_VERSION);
+
+ pci_unregister_driver(&mpt3sas_driver);
+
+ mpt3sas_ctl_exit(hbas_to_enumerate);
+
+ scsih_exit();
}
-module_init(_scsih_init);
-module_exit(_scsih_exit);
+module_init(_mpt3sas_init);
+module_exit(_mpt3sas_exit);