diff options
Diffstat (limited to 'drivers/scsi/mpi3mr/mpi3mr_os.c')
| -rw-r--r-- | drivers/scsi/mpi3mr/mpi3mr_os.c | 2065 |
1 files changed, 1786 insertions, 279 deletions
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 284117da9086..b88633e1efe2 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -2,18 +2,20 @@ /* * Driver for Broadcom MPI3 Storage Controllers * - * Copyright (C) 2017-2021 Broadcom Inc. + * Copyright (C) 2017-2023 Broadcom Inc. * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) * */ #include "mpi3mr.h" +#include <linux/idr.h> /* global driver scop variables */ LIST_HEAD(mrioc_list); DEFINE_SPINLOCK(mrioc_list_lock); -static int mrioc_ids; +static DEFINE_IDA(mrioc_ida); static int warn_non_secure_ctlr; +atomic64_t event_counter; MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); @@ -32,11 +34,28 @@ static int logging_level; module_param(logging_level, int, 0); MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info (default=0)"); +static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; +module_param(max_sgl_entries, int, 0444); +MODULE_PARM_DESC(max_sgl_entries, + "Preferred max number of SG entries to be used for a single I/O\n" + "The actual value will be determined by the driver\n" + "(Minimum=256, Maximum=2048, default=256)"); /* Forward declarations*/ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); +#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) + +#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) + +/* + * SAS Log info code for a NCQ collateral abort after an NCQ error: + * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR + * See: drivers/message/fusion/lsi/mpi_log_sas.h + */ +#define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000 + /** * mpi3mr_host_tag_for_scmd - Get host tag for a scmd * @mrioc: Adapter instance reference @@ -230,6 +249,40 @@ static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to + * the list + * @mrioc: Adapter instance reference + * @event_data: Event data + * + * Add the given hdb trigger data event to the firmware event + * list. + * + * Return: Nothing. + */ +void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc, + struct trigger_event_data *event_data) +{ + struct mpi3mr_fwevt *fwevt; + u16 sz = sizeof(*event_data); + + fwevt = mpi3mr_alloc_fwevt(sz); + if (!fwevt) { + ioc_warn(mrioc, "failed to queue hdb trigger data event\n"); + return; + } + + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = sz; + memcpy(fwevt->event_data, event_data, sz); + + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** * mpi3mr_fwevt_del_from_list - Delete firmware event from list * @mrioc: Adapter instance reference * @fwevt: Firmware event reference @@ -286,6 +339,35 @@ static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( } /** + * mpi3mr_cancel_work - cancel firmware event + * @fwevt: fwevt object which needs to be canceled + * + * Return: Nothing. + */ +static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) +{ + /* + * Wait on the fwevt to complete. If this returns 1, then + * the event was never executed. + * + * If it did execute, we wait for it to finish, and the put will + * happen from mpi3mr_process_fwevt() + */ + if (cancel_work_sync(&fwevt->work)) { + /* + * Put fwevt reference count after + * dequeuing it from worker queue + */ + mpi3mr_fwevt_put(fwevt); + /* + * Put fwevt reference count to neutralize + * kref_init increment + */ + mpi3mr_fwevt_put(fwevt); + } +} + +/** * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list * @mrioc: Adapter instance reference * @@ -302,32 +384,73 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) !mrioc->fwevt_worker_thread) return; - while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) || - (fwevt = mrioc->current_event)) { + while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) + mpi3mr_cancel_work(fwevt); + + if (mrioc->current_event) { + fwevt = mrioc->current_event; /* - * Wait on the fwevt to complete. If this returns 1, then - * the event was never executed, and we need a put for the - * reference the work had on the fwevt. - * - * If it did execute, we wait for it to finish, and the put will - * happen from mpi3mr_process_fwevt() + * Don't call cancel_work_sync() API for the + * fwevt work if the controller reset is + * get called as part of processing the + * same fwevt work (or) when worker thread is + * waiting for device add/remove APIs to complete. + * Otherwise we will see deadlock. */ - if (cancel_work_sync(&fwevt->work)) { - /* - * Put fwevt reference count after - * dequeuing it from worker queue - */ - mpi3mr_fwevt_put(fwevt); - /* - * Put fwevt reference count to neutralize - * kref_init increment - */ - mpi3mr_fwevt_put(fwevt); + if (current_work() == &fwevt->work || fwevt->pending_at_sml) { + fwevt->discard = 1; + return; } + + mpi3mr_cancel_work(fwevt); } } /** + * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * + * Accessor to queue on synthetically generated driver event to + * the event worker thread, the driver event will be used to + * reduce the QD of all VDs in the TG from the worker thread. + * + * Return: None. + */ +static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg) +{ + struct mpi3mr_fwevt *fwevt; + u16 sz = sizeof(struct mpi3mr_throttle_group_info *); + + /* + * If the QD reduction event is already queued due to throttle and if + * the QD is not restored through device info change event + * then dont queue further reduction events + */ + if (tg->fw_qd != tg->modified_qd) + return; + + fwevt = mpi3mr_alloc_fwevt(sz); + if (!fwevt) { + ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); + return; + } + *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = sz; + tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); + + dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", + tg->id); + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** * mpi3mr_invalidate_devhandles -Invalidate device handles * @mrioc: Adapter instance reference * @@ -346,6 +469,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) if (tgtdev->starget && tgtdev->starget->hostdata) { tgt_priv = tgtdev->starget->hostdata; tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; + tgt_priv->io_throttle_enabled = 0; + tgt_priv->io_divert = 0; + tgt_priv->throttle_group = NULL; + tgt_priv->wslen = 0; + if (tgtdev->host_exposed) + atomic_set(&tgt_priv->block_io, 1); } } } @@ -354,14 +483,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) * mpi3mr_print_scmd - print individual SCSI command * @rq: Block request * @data: Adapter instance reference - * @reserved: N/A. Currently not used * * Print the SCSI command details if it is in LLD scope. * * Return: true always. */ -static bool mpi3mr_print_scmd(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_print_scmd(struct request *rq, void *data) { struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); @@ -385,7 +512,6 @@ out: * mpi3mr_flush_scmd - Flush individual SCSI command * @rq: Block request * @data: Adapter instance reference - * @reserved: N/A. Currently not used * * Return the SCSI command to the upper layers if it is in LLD * scope. @@ -393,8 +519,7 @@ out: * Return: true always. */ -static bool mpi3mr_flush_scmd(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_flush_scmd(struct request *rq, void *data) { struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); @@ -424,7 +549,6 @@ out: * mpi3mr_count_dev_pending - Count commands pending for a lun * @rq: Block request * @data: SCSI device reference - * @reserved: Unused * * This is an iterator function called for each SCSI command in * a host and if the command is pending in the LLD for the @@ -434,8 +558,7 @@ out: * Return: true always. */ -static bool mpi3mr_count_dev_pending(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_count_dev_pending(struct request *rq, void *data) { struct scsi_device *sdev = (struct scsi_device *)data; struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; @@ -458,7 +581,6 @@ out: * mpi3mr_count_tgt_pending - Count commands pending for target * @rq: Block request * @data: SCSI target reference - * @reserved: Unused * * This is an iterator function called for each SCSI command in * a host and if the command is pending in the LLD for the @@ -468,8 +590,7 @@ out: * Return: true always. */ -static bool mpi3mr_count_tgt_pending(struct request *rq, - void *data, bool reserved) +static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) { struct scsi_target *starget = (struct scsi_target *)data; struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; @@ -511,6 +632,39 @@ void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) } /** + * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds + * @mrioc: Adapter instance reference + * + * This function waits for currently running IO poll threads to + * exit and then flushes all host I/Os and any internal pending + * cmds. This is executed after controller is marked as + * unrecoverable. + * + * Return: Nothing. + */ +void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) +{ + struct Scsi_Host *shost = mrioc->shost; + int i; + + if (!mrioc->unrecoverable) + return; + + if (mrioc->op_reply_qinfo) { + for (i = 0; i < mrioc->num_queues; i++) { + while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) + udelay(500); + atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); + } + } + mrioc->flush_io_count = 0; + blk_mq_tagset_busy_iter(&shost->tag_set, + mpi3mr_flush_scmd, (void *)mrioc); + mpi3mr_flush_delayed_cmd_lists(mrioc); + mpi3mr_flush_drv_cmds(mrioc); +} + +/** * mpi3mr_alloc_tgtdev - target device allocator * * Allocate target device instance and initialize the reference @@ -547,6 +701,7 @@ static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, mpi3mr_tgtdev_get(tgtdev); INIT_LIST_HEAD(&tgtdev->list); list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); + tgtdev->state = MPI3MR_DEV_CREATED; spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); } @@ -554,20 +709,25 @@ static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list * @mrioc: Adapter instance reference * @tgtdev: Target device + * @must_delete: Must delete the target device from the list irrespective + * of the device state. * * Remove the target device from the target device list * * Return: Nothing. */ static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, - struct mpi3mr_tgt_dev *tgtdev) + struct mpi3mr_tgt_dev *tgtdev, bool must_delete) { unsigned long flags; spin_lock_irqsave(&mrioc->tgtdev_lock, flags); - if (!list_empty(&tgtdev->list)) { - list_del_init(&tgtdev->list); - mpi3mr_tgtdev_put(tgtdev); + if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { + if (!list_empty(&tgtdev->list)) { + list_del_init(&tgtdev->list); + tgtdev->state = MPI3MR_DEV_DELETED; + mpi3mr_tgtdev_put(tgtdev); + } } spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); } @@ -608,7 +768,7 @@ found_tgtdev: * * Return: Target device reference. */ -static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( +struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( struct mpi3mr_ioc *mrioc, u16 handle) { struct mpi3mr_tgt_dev *tgtdev; @@ -691,6 +851,53 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( } /** + * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * @divert_value: 1 or 0 + * + * Accessor to set io_divert flag for each device associated + * with the given throttle group with the given value. + * + * Return: None. + */ +static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg, u8 divert_value) +{ + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *tgt_priv; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + if (tgt_priv->throttle_group == tg) + tgt_priv->io_divert = divert_value; + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** + * mpi3mr_print_device_event_notice - print notice related to post processing of + * device event after controller reset. + * + * @mrioc: Adapter instance reference + * @device_add: true for device add event and false for device removal event + * + * Return: None. + */ +void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, + bool device_add) +{ + ioc_notice(mrioc, "Device %s was in progress before the reset and\n", + (device_add ? "addition" : "removal")); + ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); + ioc_notice(mrioc, "are matched with attached devices for correctness\n"); +} + +/** * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers * @mrioc: Adapter instance reference * @tgtdev: Target device structure @@ -701,7 +908,7 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( * * Return: 0 on success, non zero on failure. */ -static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, +void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, struct mpi3mr_tgt_dev *tgtdev) { struct mpi3mr_stgt_priv_data *tgt_priv; @@ -710,13 +917,31 @@ static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); if (tgtdev->starget && tgtdev->starget->hostdata) { tgt_priv = tgtdev->starget->hostdata; + atomic_set(&tgt_priv->block_io, 0); tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; } - if (tgtdev->starget) { - scsi_remove_target(&tgtdev->starget->dev); - tgtdev->host_exposed = 0; - } + if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != + MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { + if (tgtdev->starget) { + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + scsi_remove_target(&tgtdev->starget->dev); + tgtdev->host_exposed = 0; + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) { + mpi3mr_print_device_event_notice(mrioc, + false); + return; + } + } + } + } else + mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); + mpi3mr_global_trigger(mrioc, + MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED); + ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); } @@ -738,23 +963,41 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, int retval = 0; struct mpi3mr_tgt_dev *tgtdev; + if (mrioc->reset_in_progress || mrioc->pci_err_recovery) + return -1; + tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); if (!tgtdev) { retval = -1; goto out; } - if (tgtdev->is_hidden) { + if (tgtdev->is_hidden || tgtdev->host_exposed) { retval = -1; goto out; } - if (!tgtdev->host_exposed && !mrioc->reset_in_progress) { + if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != + MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ tgtdev->host_exposed = 1; - scsi_scan_target(&mrioc->shost->shost_gendev, 0, - tgtdev->perst_id, + if (mrioc->current_event) + mrioc->current_event->pending_at_sml = 1; + scsi_scan_target(&mrioc->shost->shost_gendev, + mrioc->scsi_device_channel, tgtdev->perst_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); if (!tgtdev->starget) tgtdev->host_exposed = 0; - } + if (mrioc->current_event) { + mrioc->current_event->pending_at_sml = 0; + if (mrioc->current_event->discard) { + mpi3mr_print_device_event_notice(mrioc, true); + goto out; + } + } + dprint_event_bh(mrioc, + "exposed target device with handle(0x%04x), perst_id(%d)\n", + tgtdev->dev_handle, perst_id); + goto out; + } else + mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); out: if (tgtdev) mpi3mr_tgtdev_put(tgtdev); @@ -785,10 +1028,30 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev, else if (!q_depth) q_depth = MPI3MR_DEFAULT_SDEV_QD; retval = scsi_change_queue_depth(sdev, q_depth); + sdev->max_queue_depth = sdev->queue_depth; return retval; } +static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev, + struct queue_limits *lim) +{ + u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP; + + lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512; + lim->virt_boundary_mask = (1 << pgsz) - 1; +} + +static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev, + struct queue_limits *lim) +{ + if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE && + (tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) + mpi3mr_configure_nvme_dev(tgt_dev, lim); +} + /** * mpi3mr_update_sdev - Update SCSI device information * @sdev: SCSI device reference @@ -804,35 +1067,21 @@ static void mpi3mr_update_sdev(struct scsi_device *sdev, void *data) { struct mpi3mr_tgt_dev *tgtdev; + struct queue_limits lim; tgtdev = (struct mpi3mr_tgt_dev *)data; if (!tgtdev) return; mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); - switch (tgtdev->dev_type) { - case MPI3_DEVICE_DEVFORM_PCIE: - /*The block layer hw sector size = 512*/ - if ((tgtdev->dev_spec.pcie_inf.dev_info & - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { - blk_queue_max_hw_sectors(sdev->request_queue, - tgtdev->dev_spec.pcie_inf.mdts / 512); - if (tgtdev->dev_spec.pcie_inf.pgsz == 0) - blk_queue_virt_boundary(sdev->request_queue, - ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); - else - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); - } - break; - default: - break; - } + + lim = queue_limits_start_update(sdev->request_queue); + mpi3mr_configure_tgt_dev(tgtdev, &lim); + WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim)); } /** - * mpi3mr_rfresh_tgtdevs - Refresh target device exposure + * mpi3mr_refresh_tgtdevs - Refresh target device exposure * @mrioc: Adapter instance reference * * This is executed post controller reset to identify any @@ -841,26 +1090,51 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data) * * Return: Nothing. */ - -void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) +static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc) { struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_stgt_priv_data *tgt_priv; + dprint_reset(mrioc, "refresh target devices: check for removals\n"); list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, list) { - if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && - tgtdev->host_exposed) { - mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); - mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); + if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) || + tgtdev->is_hidden) && + tgtdev->host_exposed && tgtdev->starget && + tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + tgt_priv->dev_removed = 1; + atomic_set(&tgt_priv->block_io, 0); + } + } + + list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, + list) { + if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { + dprint_reset(mrioc, "removing target device with perst_id(%d)\n", + tgtdev->perst_id); + if (tgtdev->host_exposed) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); mpi3mr_tgtdev_put(tgtdev); + } else if (tgtdev->is_hidden & tgtdev->host_exposed) { + dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", + tgtdev->perst_id); + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); } } tgtdev = NULL; list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && - !tgtdev->is_hidden && !tgtdev->host_exposed) - mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); + !tgtdev->is_hidden) { + if (!tgtdev->host_exposed) + mpi3mr_report_tgtdev_to_host(mrioc, + tgtdev->perst_id); + else if (tgtdev->starget) + starget_for_each_device(tgtdev->starget, + (void *)tgtdev, mpi3mr_update_sdev); + } } } @@ -869,6 +1143,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) * @mrioc: Adapter instance reference * @tgtdev: Target device internal structure * @dev_pg0: New device page0 + * @is_added: Flag to indicate the device is just added * * Update the information from the device page0 into the driver * cached target device structure. @@ -876,30 +1151,64 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) * Return: Nothing. */ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, - struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0) + struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, + bool is_added) { u16 flags = 0; - struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; + struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; + struct mpi3mr_enclosure_node *enclosure_dev = NULL; u8 prot_mask = 0; tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); tgtdev->dev_type = dev_pg0->device_form; + tgtdev->io_unit_port = dev_pg0->io_unit_port; tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); tgtdev->slot = le16_to_cpu(dev_pg0->slot); tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); + tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); + + if (tgtdev->encl_handle) + enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, + tgtdev->encl_handle); + if (enclosure_dev) + tgtdev->enclosure_logical_id = le64_to_cpu( + enclosure_dev->pg0.enclosure_logical_id); + + flags = tgtdev->devpg0_flag; - flags = le16_to_cpu(dev_pg0->flags); tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); + if (is_added == true) + tgtdev->io_throttle_enabled = + (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; + + switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: + tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; + break; + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: + tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; + break; + case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: + default: + tgtdev->wslen = 0; + break; + } + if (tgtdev->starget && tgtdev->starget->hostdata) { scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) tgtdev->starget->hostdata; scsi_tgt_priv_data->perst_id = tgtdev->perst_id; scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; scsi_tgt_priv_data->dev_type = tgtdev->dev_type; + scsi_tgt_priv_data->io_throttle_enabled = + tgtdev->io_throttle_enabled; + if (is_added == true) + atomic_set(&scsi_tgt_priv_data->block_io, 0); + scsi_tgt_priv_data->wslen = tgtdev->wslen; } switch (dev_pg0->access_status) { @@ -923,12 +1232,25 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; tgtdev->dev_spec.sas_sata_inf.sas_address = le64_to_cpu(sasinf->sas_address); + tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; + tgtdev->dev_spec.sas_sata_inf.attached_phy_id = + sasinf->attached_phy_identifier; if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) tgtdev->is_hidden = 1; else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | MPI3_SAS_DEVICE_INFO_SSP_TARGET))) tgtdev->is_hidden = 1; + + if (((tgtdev->devpg0_flag & + MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) + && (tgtdev->devpg0_flag & + MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || + (tgtdev->parent_handle == 0xFFFF)) + tgtdev->non_stl = 1; + if (tgtdev->dev_spec.sas_sata_inf.hba_port) + tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = + dev_pg0->io_unit_port; break; } case MPI3_DEVICE_DEVFORM_PCIE: @@ -961,6 +1283,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) tgtdev->is_hidden = 1; + tgtdev->non_stl = 1; if (!mrioc->shost) break; prot_mask = scsi_host_get_prot(mrioc->shost); @@ -977,10 +1300,39 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, { struct mpi3_device0_vd_format *vdinf = &dev_pg0->device_specific.vd_format; + struct mpi3mr_throttle_group_info *tg = NULL; + u16 vdinf_io_throttle_group = + le16_to_cpu(vdinf->io_throttle_group); - tgtdev->dev_spec.vol_inf.state = vdinf->vd_state; + tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) tgtdev->is_hidden = 1; + tgtdev->non_stl = 1; + tgtdev->dev_spec.vd_inf.reset_to = + max_t(u8, vdinf->vd_reset_to, + MPI3MR_INTADMCMD_TIMEOUT); + tgtdev->dev_spec.vd_inf.abort_to = + max_t(u8, vdinf->vd_abort_to, + MPI3MR_INTADMCMD_TIMEOUT); + tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; + tgtdev->dev_spec.vd_inf.tg_high = + le16_to_cpu(vdinf->io_throttle_group_high) * 2048; + tgtdev->dev_spec.vd_inf.tg_low = + le16_to_cpu(vdinf->io_throttle_group_low) * 2048; + if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { + tg = mrioc->throttle_groups + vdinf_io_throttle_group; + tg->id = vdinf_io_throttle_group; + tg->high = tgtdev->dev_spec.vd_inf.tg_high; + tg->low = tgtdev->dev_spec.vd_inf.tg_low; + tg->qd_reduction = + tgtdev->dev_spec.vd_inf.tg_qd_reduction; + if (is_added == true) + tg->fw_qd = tgtdev->q_depth; + tg->modified_qd = tgtdev->q_depth; + } + tgtdev->dev_spec.vd_inf.tg = tg; + if (scsi_tgt_priv_data) + scsi_tgt_priv_data->throttle_group = tg; break; } default: @@ -1009,9 +1361,9 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, (struct mpi3_event_data_device_status_change *)fwevt->event_data; dev_handle = le16_to_cpu(evtdata->dev_handle); - ioc_info(mrioc, - "%s :device status change: handle(0x%04x): reason code(0x%x)\n", - __func__, dev_handle, evtdata->reason_code); + dprint_event_bh(mrioc, + "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_HIDDEN: delete = 1; @@ -1030,19 +1382,24 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "processing device status change event bottom half,\n" + "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); goto out; + } if (uhide) { tgtdev->is_hidden = 0; if (!tgtdev->host_exposed) mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); } - if (tgtdev->starget && tgtdev->starget->hostdata) { - if (delete) - mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); - } + + if (delete) + mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); + if (cleanup) { - mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); mpi3mr_tgtdev_put(tgtdev); } @@ -1071,13 +1428,18 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, perst_id = le16_to_cpu(dev_pg0->persistent_id); dev_handle = le16_to_cpu(dev_pg0->dev_handle); - ioc_info(mrioc, - "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", - __func__, dev_handle, perst_id); + dprint_event_bh(mrioc, + "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "cannot identify target device for device info\n" + "change event handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); goto out; - mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); + } + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); if (!tgtdev->is_hidden && !tgtdev->host_exposed) mpi3mr_report_tgtdev_to_host(mrioc, perst_id); if (tgtdev->is_hidden && tgtdev->host_exposed) @@ -1091,6 +1453,191 @@ out: } /** + * mpi3mr_free_enclosure_list - release enclosures + * @mrioc: Adapter instance reference + * + * Free memory allocated during encloure add. + * + * Return nothing. + */ +void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; + + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &mrioc->enclosure_list, list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +/** + * mpi3mr_enclosure_find_by_handle - enclosure search by handle + * @mrioc: Adapter instance reference + * @handle: Firmware device handle of the enclosure + * + * This searches for enclosure device based on handle, then returns the + * enclosure object. + * + * Return: Enclosure object reference or NULL + */ +struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( + struct mpi3mr_ioc *mrioc, u16 handle) +{ + struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; + + list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} + +/** + * mpi3mr_process_trigger_data_event_bh - Process trigger event + * data + * @mrioc: Adapter instance reference + * @event_data: Event data + * + * This function releases diage buffers or issues diag fault + * based on trigger conditions + * + * Return: Nothing + */ +static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc, + struct trigger_event_data *event_data) +{ + struct diag_buffer_desc *trace_hdb = event_data->trace_hdb; + struct diag_buffer_desc *fw_hdb = event_data->fw_hdb; + unsigned long flags; + int retval = 0; + u8 trigger_type = event_data->trigger_type; + union mpi3mr_trigger_data *trigger_data = + &event_data->trigger_specific_data; + + if (event_data->snapdump) { + if (trace_hdb) + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, + trigger_data, 1); + if (fw_hdb) + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, + trigger_data, 1); + mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_TRIGGER, 1); + return; + } + + if (trace_hdb) { + retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb); + if (!retval) { + mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type, + trigger_data, 1); + } + spin_lock_irqsave(&mrioc->trigger_lock, flags); + mrioc->trace_release_trigger_active = false; + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } + if (fw_hdb) { + retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb); + if (!retval) { + mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type, + trigger_data, 1); + } + spin_lock_irqsave(&mrioc->trigger_lock, flags); + mrioc->fw_release_trigger_active = false; + spin_unlock_irqrestore(&mrioc->trigger_lock, flags); + } +} + +/** + * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event + * @mrioc: Adapter instance reference + * @encl_pg0: Enclosure page 0. + * @is_added: Added event or not + * + * Return nothing. + */ +static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, + struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) +{ + char *reason_str = NULL; + + if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) + return; + + if (is_added) + reason_str = "enclosure added"; + else + reason_str = "enclosure dev status changed"; + + ioc_info(mrioc, + "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", + reason_str, le16_to_cpu(encl_pg0->enclosure_handle), + (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); + ioc_info(mrioc, + "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", + le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, + le16_to_cpu(encl_pg0->flags), + ((le16_to_cpu(encl_pg0->flags) & + MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); +} + +/** + * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Prints information about the Enclosure device status or + * Enclosure add events if logging is enabled and add or remove + * the enclosure from the controller's internal list of + * enclosures. + * + * Return: Nothing. + */ +static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + struct mpi3mr_enclosure_node *enclosure_dev = NULL; + struct mpi3_enclosure_page0 *encl_pg0; + u16 encl_handle; + u8 added, present; + + encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; + added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; + mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); + + + encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); + present = ((le16_to_cpu(encl_pg0->flags) & + MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); + + if (encl_handle) + enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, + encl_handle); + if (!enclosure_dev && present) { + enclosure_dev = + kzalloc(sizeof(struct mpi3mr_enclosure_node), + GFP_KERNEL); + if (!enclosure_dev) + return; + list_add_tail(&enclosure_dev->list, + &mrioc->enclosure_list); + } + if (enclosure_dev) { + if (!present) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } else + memcpy(&enclosure_dev->pg0, encl_pg0, + sizeof(enclosure_dev->pg0)); + + } +} + +/** * mpi3mr_sastopochg_evt_debug - SASTopoChange details * @mrioc: Adapter instance reference * @event_data: SAS topology change list event data @@ -1129,8 +1676,9 @@ mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, ioc_info(mrioc, "%s :sas topology change: (%s)\n", __func__, status_str); ioc_info(mrioc, - "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", + "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", __func__, le16_to_cpu(event_data->expander_dev_handle), + event_data->io_unit_port, le16_to_cpu(event_data->enclosure_handle), event_data->start_phy_num, event_data->num_entries); for (i = 0; i < event_data->num_entries; i++) { @@ -1188,11 +1736,34 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, int i; u16 handle; u8 reason_code; + u64 exp_sas_address = 0, parent_sas_address = 0; + struct mpi3mr_hba_port *hba_port = NULL; struct mpi3mr_tgt_dev *tgtdev = NULL; + struct mpi3mr_sas_node *sas_expander = NULL; + unsigned long flags; + u8 link_rate, prev_link_rate, parent_phy_number; mpi3mr_sastopochg_evt_debug(mrioc, event_data); + if (mrioc->sas_transport_enabled) { + hba_port = mpi3mr_get_hba_port_by_id(mrioc, + event_data->io_unit_port); + if (le16_to_cpu(event_data->expander_dev_handle)) { + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + sas_expander = __mpi3mr_expander_find_by_handle(mrioc, + le16_to_cpu(event_data->expander_dev_handle)); + if (sas_expander) { + exp_sas_address = sas_expander->sas_address; + hba_port = sas_expander->hba_port; + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + parent_sas_address = exp_sas_address; + } else + parent_sas_address = mrioc->sas_hba.sas_address; + } for (i = 0; i < event_data->num_entries; i++) { + if (fwevt->discard) + return; handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); if (!handle) continue; @@ -1207,15 +1778,40 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: if (tgtdev->host_exposed) mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); - mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); mpi3mr_tgtdev_put(tgtdev); break; + case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: + case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: + case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: + { + if (!mrioc->sas_transport_enabled || tgtdev->non_stl + || tgtdev->is_hidden) + break; + link_rate = event_data->phy_entry[i].link_rate >> 4; + prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; + if (link_rate == prev_link_rate) + break; + if (!parent_sas_address) + break; + parent_phy_number = event_data->start_phy_num + i; + mpi3mr_update_links(mrioc, parent_sas_address, handle, + parent_phy_number, link_rate, hba_port); + break; + } default: break; } if (tgtdev) mpi3mr_tgtdev_put(tgtdev); } + + if (mrioc->sas_transport_enabled && (event_data->exp_status == + MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { + if (sas_expander) + mpi3mr_expander_remove(mrioc, exp_sas_address, + hba_port); + } } /** @@ -1324,6 +1920,8 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, mpi3mr_pcietopochg_evt_debug(mrioc, event_data); for (i = 0; i < event_data->num_entries; i++) { + if (fwevt->discard) + return; handle = le16_to_cpu(event_data->port_entry[i].attached_dev_handle); if (!handle) @@ -1338,7 +1936,7 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: if (tgtdev->host_exposed) mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); - mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); mpi3mr_tgtdev_put(tgtdev); break; default: @@ -1350,6 +1948,77 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_logdata_evt_bh - Log data event bottomhalf + * @mrioc: Adapter instance reference + * @fwevt: Firmware event reference + * + * Extracts the event data and calls application interfacing + * function to process the event further. + * + * Return: Nothing. + */ +static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, + struct mpi3mr_fwevt *fwevt) +{ + mpi3mr_app_save_logdata(mrioc, fwevt->event_data, + fwevt->event_data_size); +} + +/** + * mpi3mr_update_sdev_qd - Update SCSI device queue depath + * @sdev: SCSI device reference + * @data: Queue depth reference + * + * This is an iterator function called for each SCSI device in a + * target to update the QD of each SCSI device. + * + * Return: Nothing. + */ +static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) +{ + u16 *q_depth = (u16 *)data; + + scsi_change_queue_depth(sdev, (int)*q_depth); + sdev->max_queue_depth = sdev->queue_depth; +} + +/** + * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs + * @mrioc: Adapter instance reference + * @tg: Throttle group information pointer + * + * Accessor to reduce QD for each device associated with the + * given throttle group. + * + * Return: None. + */ +static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, + struct mpi3mr_throttle_group_info *tg) +{ + unsigned long flags; + struct mpi3mr_tgt_dev *tgtdev; + struct mpi3mr_stgt_priv_data *tgt_priv; + + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { + if (tgtdev->starget && tgtdev->starget->hostdata) { + tgt_priv = tgtdev->starget->hostdata; + if (tgt_priv->throttle_group == tg) { + dprint_event_bh(mrioc, + "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", + tgt_priv->perst_id, tgtdev->q_depth, + tg->modified_qd); + starget_for_each_device(tgtdev->starget, + (void *)&tg->modified_qd, + mpi3mr_update_sdev_qd); + } + } + } + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); +} + +/** * mpi3mr_fwevt_bh - Firmware event bottomhalf handler * @mrioc: Adapter instance reference * @fwevt: Firmware event reference @@ -1362,28 +2031,61 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, struct mpi3mr_fwevt *fwevt) { - mrioc->current_event = fwevt; + struct mpi3_device_page0 *dev_pg0 = NULL; + u16 perst_id, handle, dev_info; + struct mpi3_device0_sas_sata_format *sasinf = NULL; + unsigned int timeout; + mpi3mr_fwevt_del_from_list(mrioc, fwevt); + mrioc->current_event = fwevt; - if (mrioc->stop_drv_processing) + if (mrioc->stop_drv_processing) { + dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" + "due to stop_drv_processing\n", fwevt->event_id); + goto out; + } + + if (mrioc->unrecoverable) { + dprint_event_bh(mrioc, + "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", + fwevt->event_id); goto out; + } if (!fwevt->process_evt) goto evt_ack; + dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n", + fwevt->event_id, fwevt->evt_ctx); + switch (fwevt->event_id) { case MPI3_EVENT_DEVICE_ADDED: { - struct mpi3_device_page0 *dev_pg0 = - (struct mpi3_device_page0 *)fwevt->event_data; - mpi3mr_report_tgtdev_to_host(mrioc, - le16_to_cpu(dev_pg0->persistent_id)); + dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; + perst_id = le16_to_cpu(dev_pg0->persistent_id); + handle = le16_to_cpu(dev_pg0->dev_handle); + if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) + mpi3mr_report_tgtdev_to_host(mrioc, perst_id); + else if (mrioc->sas_transport_enabled && + (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { + sasinf = &dev_pg0->device_specific.sas_sata_format; + dev_info = le16_to_cpu(sasinf->device_info); + if (!mrioc->sas_hba.num_phys) + mpi3mr_sas_host_add(mrioc); + else + mpi3mr_sas_host_refresh(mrioc); + + if (mpi3mr_is_expander_device(dev_info)) + mpi3mr_expander_add(mrioc, handle); + } break; } case MPI3_EVENT_DEVICE_INFO_CHANGED: { - mpi3mr_devinfochg_evt_bh(mrioc, - (struct mpi3_device_page0 *)fwevt->event_data); + dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; + perst_id = le16_to_cpu(dev_pg0->persistent_id); + if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) + mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); break; } case MPI3_EVENT_DEVICE_STATUS_CHANGE: @@ -1391,6 +2093,13 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); break; } + case MPI3_EVENT_ENCL_DEVICE_ADDED: + case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: + { + mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); + break; + } + case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: { mpi3mr_sastopochg_evt_bh(mrioc, fwevt); @@ -1401,6 +2110,57 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); break; } + case MPI3_EVENT_LOG_DATA: + { + mpi3mr_logdata_evt_bh(mrioc, fwevt); + break; + } + case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: + { + struct mpi3mr_throttle_group_info *tg; + + tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; + dprint_event_bh(mrioc, + "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", + tg->id, tg->need_qd_reduction); + if (tg->need_qd_reduction) { + mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); + tg->need_qd_reduction = 0; + } + break; + } + case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: + { + timeout = MPI3MR_RESET_TIMEOUT * 2; + while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) && + !mrioc->unrecoverable && !mrioc->pci_err_recovery) { + msleep(500); + if (!timeout--) { + mrioc->unrecoverable = 1; + break; + } + } + + if (mrioc->unrecoverable || mrioc->pci_err_recovery) + break; + + dprint_event_bh(mrioc, + "scan for non responding and newly added devices after soft reset started\n"); + if (mrioc->sas_transport_enabled) { + mpi3mr_refresh_sas_ports(mrioc); + mpi3mr_refresh_expanders(mrioc); + } + mpi3mr_refresh_tgtdevs(mrioc); + ioc_info(mrioc, + "scan for non responding and newly added devices after soft reset completed\n"); + break; + } + case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER: + { + mpi3mr_process_trigger_data_event_bh(mrioc, + (struct trigger_event_data *)fwevt->event_data); + break; + } default: break; } @@ -1453,17 +2213,26 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, int retval = 0; struct mpi3mr_tgt_dev *tgtdev = NULL; u16 perst_id = 0; + unsigned long flags; perst_id = le16_to_cpu(dev_pg0->persistent_id); - tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); + if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) + return retval; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); + if (tgtdev) + tgtdev->state = MPI3MR_DEV_CREATED; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); + if (tgtdev) { - mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); mpi3mr_tgtdev_put(tgtdev); } else { tgtdev = mpi3mr_alloc_tgtdev(); if (!tgtdev) return -ENOMEM; - mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); + mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); } @@ -1517,6 +2286,9 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + ioc_info(mrioc, "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, @@ -1557,6 +2329,8 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, kfree(delayed_dev_rmhs); return; } + +clear_drv_cmd: drv_cmd->state = MPI3MR_CMD_NOTUSED; drv_cmd->callback = NULL; drv_cmd->retry_count = 0; @@ -1583,6 +2357,9 @@ static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; int retval; + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; @@ -1611,11 +2388,11 @@ static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, if (retval) { pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", mrioc->name); - goto out_failed; + goto clear_drv_cmd; } return; -out_failed: +clear_drv_cmd: drv_cmd->state = MPI3MR_CMD_NOTUSED; drv_cmd->callback = NULL; drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; @@ -1645,6 +2422,14 @@ static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, u8 retrycount = 5; struct mpi3mr_drv_cmd *drv_cmd = cmdparam; struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; + struct mpi3mr_tgt_dev *tgtdev = NULL; + unsigned long flags; + + spin_lock_irqsave(&mrioc->tgtdev_lock, flags); + tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); + if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) + tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; + spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); if (drv_cmd) goto issue_cmd; @@ -1730,6 +2515,9 @@ static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; struct delayed_evt_ack_node *delayed_evtack = NULL; + if (drv_cmd->state & MPI3MR_CMD_RESET) + goto clear_drv_cmd; + if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { dprint_event_th(mrioc, "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", @@ -1747,6 +2535,7 @@ static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, kfree(delayed_evtack); return; } +clear_drv_cmd: drv_cmd->state = MPI3MR_CMD_NOTUSED; drv_cmd->callback = NULL; clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); @@ -2007,6 +2796,9 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, goto out; dev_handle = le16_to_cpu(evtdata->dev_handle); + dprint_event_th(mrioc, + "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", + evtdata->reason_code, dev_handle); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: @@ -2030,8 +2822,12 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_th(mrioc, + "processing device status change event could not identify device for handle(0x%04x)\n", + dev_handle); goto out; + } if (hide) tgtdev->is_hidden = hide; if (tgtdev->starget && tgtdev->starget->hostdata) { @@ -2076,12 +2872,14 @@ static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, "prepare for reset event top half with rc=start\n"); if (mrioc->prepare_for_reset) return; + scsi_block_requests(mrioc->shost); mrioc->prepare_for_reset = 1; mrioc->prepare_for_reset_timeout_counter = 0; } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { dprint_event_th(mrioc, "prepare for reset top half with rc=abort\n"); mrioc->prepare_for_reset = 0; + scsi_unblock_requests(mrioc->shost); mrioc->prepare_for_reset_timeout_counter = 0; } if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) @@ -2107,43 +2905,19 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); if (shutdown_timeout <= 0) { - ioc_warn(mrioc, + dprint_event_th(mrioc, "%s :Invalid Shutdown Timeout received = %d\n", __func__, shutdown_timeout); return; } - ioc_info(mrioc, + dprint_event_th(mrioc, "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); mrioc->facts.shutdown_timeout = shutdown_timeout; } /** - * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf - * @mrioc: Adapter instance reference - * @event_reply: event data - * - * Displays temperature threshold event details and fault code - * if any is hit due to temperature exceeding threshold. - * - * Return: Nothing - */ -static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc, - struct mpi3_event_notification_reply *event_reply) -{ - struct mpi3_event_data_temp_threshold *evtdata = - (struct mpi3_event_data_temp_threshold *)event_reply->event_data; - - ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n", - (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ", - (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ", - (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num, - le16_to_cpu(evtdata->current_temperature)); - mpi3mr_print_fault_info(mrioc); -} - -/** * mpi3mr_cablemgmt_evt_th - Cable management event tophalf * @mrioc: Adapter instance reference * @event_reply: event data @@ -2180,13 +2954,42 @@ static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, } /** + * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event + * @mrioc: Adapter instance reference + * + * Add driver specific event to make sure that the driver won't process the + * events until all the devices are refreshed during soft reset. + * + * Return: Nothing + */ +void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) +{ + struct mpi3mr_fwevt *fwevt = NULL; + + fwevt = mpi3mr_alloc_fwevt(0); + if (!fwevt) { + dprint_event_th(mrioc, + "failed to schedule bottom half handler for event(0x%02x)\n", + MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); + return; + } + fwevt->mrioc = mrioc; + fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; + fwevt->send_ack = 0; + fwevt->process_evt = 1; + fwevt->evt_ctx = 0; + fwevt->event_data_size = 0; + mpi3mr_fwevt_add_to_list(mrioc, fwevt); +} + +/** * mpi3mr_os_handle_events - Firmware event handler * @mrioc: Adapter instance reference * @event_reply: event data * - * Identify whteher the event has to handled and acknowledged - * and either process the event in the tophalf and/or schedule a - * bottom half through mpi3mr_fwevt_worker. + * Identifies whether the event has to be handled and acknowledged, + * and either processes the event in the top-half and/or schedule a + * bottom-half through mpi3mr_fwevt_worker(). * * Return: Nothing */ @@ -2205,6 +3008,7 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, ack_req = 1; evt_type = event_reply->event; + mpi3mr_event_trigger(mrioc, event_reply->event); switch (evt_type) { case MPI3_EVENT_DEVICE_ADDED: @@ -2212,9 +3016,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0 = (struct mpi3_device_page0 *)event_reply->event_data; if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) - ioc_err(mrioc, - "%s :Failed to add device in the device add event\n", - __func__); + dprint_event_th(mrioc, + "failed to process device added event for handle(0x%04x),\n" + "perst_id(%d) in the event top half handler\n", + le16_to_cpu(dev_pg0->dev_handle), + le16_to_cpu(dev_pg0->persistent_id)); else process_evt_bh = 1; break; @@ -2243,7 +3049,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, ack_req = 0; break; } + case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE: + { + mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply); + break; + } case MPI3_EVENT_DEVICE_INFO_CHANGED: + case MPI3_EVENT_LOG_DATA: + case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: + case MPI3_EVENT_ENCL_DEVICE_ADDED: { process_evt_bh = 1; break; @@ -2253,17 +3067,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, mpi3mr_energypackchg_evt_th(mrioc, event_reply); break; } - case MPI3_EVENT_TEMP_THRESHOLD: - { - mpi3mr_tempthreshold_evt_th(mrioc, event_reply); - break; - } case MPI3_EVENT_CABLE_MGMT: { mpi3mr_cablemgmt_evt_th(mrioc, event_reply); break; } - case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: case MPI3_EVENT_SAS_DISCOVERY: case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: @@ -2275,11 +3083,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, break; } if (process_evt_bh || ack_req) { + dprint_event_th(mrioc, + "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n", + evt_type, le32_to_cpu(event_reply->event_context), ack_req); sz = event_reply->event_data_length * 4; fwevt = mpi3mr_alloc_fwevt(sz); if (!fwevt) { - ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", - __func__, __FILE__, __LINE__, __func__); + dprint_event_th(mrioc, + "failed to schedule bottom half handler for\n" + "event(0x%02x), ack_required=%d\n", evt_type, ack_req); return; } @@ -2491,6 +3303,11 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, u32 xfer_count = 0, sense_count = 0, resp_data = 0; u16 dev_handle = 0xFFFF; struct scsi_sense_hdr sshdr; + struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; + struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; + u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; + struct mpi3mr_throttle_group_info *tg = NULL; + u8 throttle_enabled_dev = 0; *reply_dma = 0; reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & @@ -2504,6 +3321,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; @@ -2532,6 +3350,12 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) panic("%s: Ran out of sense buffers\n", mrioc->name); + if (sense_buf) { + scsi_normalize_sense(sense_buf, sense_count, &sshdr); + mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key, + sshdr.asc, sshdr.ascq); + } + mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo); break; case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; @@ -2547,10 +3371,58 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, goto out; } priv = scsi_cmd_priv(scmd); + + data_len_blks = scsi_bufflen(scmd) >> 9; + sdev_priv_data = scmd->device->hostdata; + if (sdev_priv_data) { + stgt_priv_data = sdev_priv_data->tgt_priv_data; + if (stgt_priv_data) { + tg = stgt_priv_data->throttle_group; + throttle_enabled_dev = + stgt_priv_data->io_throttle_enabled; + dev_handle = stgt_priv_data->dev_handle; + } + } + if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && + throttle_enabled_dev)) { + ioc_pend_data_len = atomic_sub_return(data_len_blks, + &mrioc->pend_large_data_sz); + if (tg) { + tg_pend_data_len = atomic_sub_return(data_len_blks, + &tg->pend_large_data_sz); + if (tg->io_divert && ((ioc_pend_data_len <= + mrioc->io_throttle_low) && + (tg_pend_data_len <= tg->low))) { + tg->io_divert = 0; + mpi3mr_set_io_divert_for_all_vd_in_tg( + mrioc, tg, 0); + } + } else { + if (ioc_pend_data_len <= mrioc->io_throttle_low) + stgt_priv_data->io_divert = 0; + } + } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { + ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); + if (!tg) { + if (ioc_pend_data_len <= mrioc->io_throttle_low) + stgt_priv_data->io_divert = 0; + + } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { + tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); + if (tg->io_divert && (tg_pend_data_len <= tg->low)) { + tg->io_divert = 0; + mpi3mr_set_io_divert_for_all_vd_in_tg( + mrioc, tg, 0); + } + } + } + if (success_desc) { scmd->result = DID_OK << 16; goto out_success; } + + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || @@ -2573,7 +3445,18 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, scmd->result = DID_NO_CONNECT << 16; break; case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: - scmd->result = DID_SOFT_ERROR << 16; + if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) { + /* + * This is a ATA NCQ command aborted due to another NCQ + * command failure. We must retry this command + * immediately but without incrementing its retry + * counter. + */ + WARN_ON_ONCE(xfer_count != 0); + scmd->result = DID_IMM_RETRY << 16; + } else { + scmd->result = DID_SOFT_ERROR << 16; + } break; case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: @@ -2633,7 +3516,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, } if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && - (scmd->cmnd[0] != ATA_16)) { + (scmd->cmnd[0] != ATA_16) && + mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, scmd->result); scsi_print_command(scmd); @@ -2678,19 +3562,19 @@ static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) { u8 retry_count = 5; int cmd_idx = -1; + unsigned long flags; + spin_lock_irqsave(&mrioc->chain_buf_lock, flags); do { - spin_lock(&mrioc->chain_buf_lock); cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, mrioc->chain_buf_count); if (cmd_idx < mrioc->chain_buf_count) { set_bit(cmd_idx, mrioc->chain_bitmap); - spin_unlock(&mrioc->chain_buf_lock); break; } - spin_unlock(&mrioc->chain_buf_lock); cmd_idx = -1; } while (retry_count--); + spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); return cmd_idx; } @@ -2750,6 +3634,17 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, scmd->sc_data_direction); priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ } else { + /* + * Some firmware versions byte-swap the REPORT ZONES command + * reply from ATA-ZAC devices by directly accessing in the host + * buffer. This does not respect the default command DMA + * direction and causes IOMMU page faults on some architectures + * with an IOMMU enforcing write mappings (e.g. AMD hosts). + * Avoid such issue by making the REPORT ZONES buffer mapping + * bi-directional. + */ + if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) + scmd->sc_data_direction = DMA_BIDIRECTIONAL; sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); } @@ -2760,7 +3655,7 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, scsi_bufflen(scmd)); return -ENOMEM; } - if (sges_left > MPI3MR_SG_DEPTH) { + if (sges_left > mrioc->max_sgl_entries) { sdev_printk(KERN_ERR, scmd->device, "scsi_dma_map returned unsupported sge count %d!\n", sges_left); @@ -2945,7 +3840,7 @@ inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) * * Return: 0 on success, non-zero on errors */ -static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, +int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, u16 handle, uint lun, u16 htag, ulong timeout, struct mpi3mr_drv_cmd *drv_cmd, u8 *resp_code, struct scsi_cmnd *scmd) @@ -2982,6 +3877,13 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, mutex_unlock(&drv_cmd->mutex); goto out; } + if (mrioc->block_on_pci_err) { + retval = -1; + dprint_tm(mrioc, "sending task management failed due to\n" + "pci error recovery in progress\n"); + mutex_unlock(&drv_cmd->mutex); + goto out; + } drv_cmd->state = MPI3MR_CMD_PENDING; drv_cmd->is_waiting = 1; @@ -2996,6 +3898,18 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); if (scmd) { + if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + cmd_priv = scsi_cmd_priv(scmd); + if (!cmd_priv) + goto out_unlock; + + struct op_req_qinfo *op_req_q; + + op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; + tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); + tm_req.task_request_queue_id = + cpu_to_le16(op_req_q->qid); + } sdev = scmd->device; sdev_priv_data = sdev->hostdata; scsi_tgt_priv_data = ((sdev_priv_data) ? @@ -3009,11 +3923,13 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, if (scsi_tgt_priv_data) atomic_inc(&scsi_tgt_priv_data->block_io); - if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { - if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) - timeout = tgtdev->dev_spec.pcie_inf.abort_to; - else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) - timeout = tgtdev->dev_spec.pcie_inf.reset_to; + if (tgtdev) { + if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) + timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to + : tgtdev->dev_spec.pcie_inf.reset_to; + else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD) + timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to + : tgtdev->dev_spec.vd_inf.reset_to; } init_completion(&drv_cmd->done); @@ -3087,6 +4003,7 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, mpi3mr_poll_pend_io_completions(mrioc); mpi3mr_ioc_enable_intr(mrioc); mpi3mr_poll_pend_io_completions(mrioc); + mpi3mr_process_admin_reply_q(mrioc); } switch (tm_type) { case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: @@ -3107,6 +4024,8 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, default: break; } + mpi3mr_global_trigger(mrioc, + MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED); out_unlock: drv_cmd->state = MPI3MR_CMD_NOTUSED; @@ -3122,7 +4041,7 @@ out: /** * mpi3mr_bios_param - BIOS param callback * @sdev: SCSI device reference - * @bdev: Block device reference + * @unused: gendisk reference * @capacity: Capacity in logical sectors * @params: Parameter array * @@ -3131,7 +4050,7 @@ out: * Return: 0 always */ static int mpi3mr_bios_param(struct scsi_device *sdev, - struct block_device *bdev, sector_t capacity, int params[]) + struct gendisk *unused, sector_t capacity, int params[]) { int heads; int sectors; @@ -3167,7 +4086,7 @@ static int mpi3mr_bios_param(struct scsi_device *sdev, * * Return: return zero. */ -static int mpi3mr_map_queues(struct Scsi_Host *shost) +static void mpi3mr_map_queues(struct Scsi_Host *shost) { struct mpi3mr_ioc *mrioc = shost_priv(shost); int i, qoff, offset; @@ -3196,16 +4115,13 @@ static int mpi3mr_map_queues(struct Scsi_Host *shost) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL) - blk_mq_pci_map_queues(map, mrioc->pdev, offset); + blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset); else blk_mq_map_queues(map); qoff += map->nr_queues; offset += map->nr_queues; } - - return 0; - } /** @@ -3283,23 +4199,91 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) } /** + * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same + * @mrioc: Adapter instance reference + * @scmd: SCSI command reference + * @scsiio_req: MPI3 SCSI IO request + * @scsiio_flags: Pointer to MPI3 SCSI IO Flags + * @wslen: write same max length + * + * Gets values of unmap, ndob and number of blocks from write + * same scsi io and based on these values it sets divert IO flag + * and reason for diverting IO to firmware. + * + * Return: Nothing + */ +static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, + struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, + u32 *scsiio_flags, u16 wslen) +{ + u8 unmap = 0, ndob = 0; + u8 opcode = scmd->cmnd[0]; + u32 num_blocks = 0; + u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); + + if (opcode == WRITE_SAME_16) { + unmap = scmd->cmnd[1] & 0x08; + ndob = scmd->cmnd[1] & 0x01; + num_blocks = get_unaligned_be32(scmd->cmnd + 10); + } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { + unmap = scmd->cmnd[10] & 0x08; + ndob = scmd->cmnd[10] & 0x01; + num_blocks = get_unaligned_be32(scmd->cmnd + 28); + } else + return; + + if ((unmap) && (ndob) && (num_blocks > wslen)) { + scsiio_req->msg_flags |= + MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; + *scsiio_flags |= + MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; + } +} + +/** * mpi3mr_eh_host_reset - Host reset error handling callback * @scmd: SCSI command reference * - * Issue controller reset if the scmd is for a Physical Device, - * if the scmd is for RAID volume, then wait for - * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any - * pending I/Os prior to issuing reset to the controller. + * Issue controller reset * * Return: SUCCESS of successful reset else FAILED */ static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) { struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + int retval = FAILED, ret; + + ret = mpi3mr_soft_reset_handler(mrioc, + MPI3MR_RESET_FROM_EH_HOS, 1); + if (ret) + goto out; + + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "Host reset is %s for scmd(%p)\n", + ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return retval; +} + +/** + * mpi3mr_eh_bus_reset - Bus reset error handling callback + * @scmd: SCSI command reference + * + * Checks whether pending I/Os are present for the RAID volume; + * if not there's no need to reset the adapter. + * + * Return: SUCCESS of successful reset else FAILED + */ +static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); struct mpi3mr_stgt_priv_data *stgt_priv_data; struct mpi3mr_sdev_priv_data *sdev_priv_data; u8 dev_type = MPI3_DEVICE_DEVFORM_VD; - int retval = FAILED, ret; + int retval = FAILED; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; sdev_priv_data = scmd->device->hostdata; if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { @@ -3309,25 +4293,28 @@ static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) if (dev_type == MPI3_DEVICE_DEVFORM_VD) { mpi3mr_wait_for_host_io(mrioc, - MPI3MR_RAID_ERRREC_RESET_TIMEOUT); + MPI3MR_RAID_ERRREC_RESET_TIMEOUT); if (!mpi3mr_get_fw_pending_ios(mrioc)) { + while (mrioc->reset_in_progress || + mrioc->prepare_for_reset || + mrioc->block_on_pci_err) { + ssleep(1); + if (!timeout--) { + retval = FAILED; + goto out; + } + } retval = SUCCESS; goto out; } } + if (retval == FAILED) + mpi3mr_print_pending_host_io(mrioc); - mpi3mr_print_pending_host_io(mrioc); - ret = mpi3mr_soft_reset_handler(mrioc, - MPI3MR_RESET_FROM_EH_HOS, 1); - if (ret) - goto out; - - retval = SUCCESS; out: sdev_printk(KERN_INFO, scmd->device, - "Host reset is %s for scmd(%p)\n", - ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); - + "Bus reset is %s for scmd(%p)\n", + ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return retval; } @@ -3365,10 +4352,14 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) stgt_priv_data = sdev_priv_data->tgt_priv_data; dev_handle = stgt_priv_data->dev_handle; if (stgt_priv_data->dev_removed) { + struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); sdev_printk(KERN_INFO, scmd->device, "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", mrioc->name, dev_handle); - retval = FAILED; + if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) + retval = SUCCESS; + else + retval = FAILED; goto out; } sdev_printk(KERN_INFO, scmd->device, @@ -3386,7 +4377,7 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) if (stgt_priv_data->pend_count) { sdev_printk(KERN_INFO, scmd->device, "%s: target has %d pending commands, target reset is failed\n", - mrioc->name, sdev_priv_data->pend_count); + mrioc->name, stgt_priv_data->pend_count); goto out; } @@ -3433,10 +4424,14 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) stgt_priv_data = sdev_priv_data->tgt_priv_data; dev_handle = stgt_priv_data->dev_handle; if (stgt_priv_data->dev_removed) { + struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); sdev_printk(KERN_INFO, scmd->device, "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", mrioc->name, dev_handle); - retval = FAILED; + if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) + retval = SUCCESS; + else + retval = FAILED; goto out; } sdev_printk(KERN_INFO, scmd->device, @@ -3466,6 +4461,92 @@ out: } /** + * mpi3mr_eh_abort - Callback function for abort error handling + * @scmd: SCSI command reference + * + * Issues Abort Task Management if the command is in LLD scope + * and verifies if it is aborted successfully, and return status + * accordingly. + * + * Return: SUCCESS if the abort was successful, otherwise FAILED + */ +static int mpi3mr_eh_abort(struct scsi_cmnd *scmd) +{ + struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); + struct mpi3mr_stgt_priv_data *stgt_priv_data; + struct mpi3mr_sdev_priv_data *sdev_priv_data; + struct scmd_priv *cmd_priv; + u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT; + u8 resp_code = 0; + int retval = FAILED, ret = 0; + struct request *rq = scsi_cmd_to_rq(scmd); + unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc); + unsigned long scmd_age_sec = scmd_age_ms / HZ; + + sdev_printk(KERN_INFO, scmd->device, + "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd); + + sdev_printk(KERN_INFO, scmd->device, + "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n", + mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ, + scmd->retries, scmd->allowed); + + scsi_print_command(scmd); + + sdev_priv_data = scmd->device->hostdata; + if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Device not available, Skip issuing abort task\n", + mrioc->name); + retval = SUCCESS; + goto out; + } + + stgt_priv_data = sdev_priv_data->tgt_priv_data; + dev_handle = stgt_priv_data->dev_handle; + + cmd_priv = scsi_cmd_priv(scmd); + if (!cmd_priv->in_lld_scope || + cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) { + sdev_printk(KERN_INFO, scmd->device, + "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n", + mrioc->name, scmd); + retval = SUCCESS; + goto out; + } + + if (stgt_priv_data->dev_removed) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n", + mrioc->name, dev_handle); + retval = FAILED; + goto out; + } + + ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, + timeout, &mrioc->host_tm_cmds, &resp_code, scmd); + + if (ret) + goto out; + + if (cmd_priv->in_lld_scope) { + sdev_printk(KERN_INFO, scmd->device, + "%s: Abort task failed. scmd (0x%p) was not terminated\n", + mrioc->name, scmd); + goto out; + } + + retval = SUCCESS; +out: + sdev_printk(KERN_INFO, scmd->device, + "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name, + ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd); + + return retval; +} + +/** * mpi3mr_scan_start - Scan start callback handler * @shost: SCSI host reference * @@ -3538,25 +4619,27 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost, mpi3mr_start_watchdog(mrioc); mrioc->is_driver_loading = 0; + mrioc->stop_bsgs = 0; return 1; } /** - * mpi3mr_slave_destroy - Slave destroy callback handler + * mpi3mr_sdev_destroy - Slave destroy callback handler * @sdev: SCSI device reference * * Cleanup and free per device(lun) private data. * * Return: Nothing. */ -static void mpi3mr_slave_destroy(struct scsi_device *sdev) +static void mpi3mr_sdev_destroy(struct scsi_device *sdev) { struct Scsi_Host *shost; struct mpi3mr_ioc *mrioc; struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; - struct mpi3mr_tgt_dev *tgt_dev; + struct mpi3mr_tgt_dev *tgt_dev = NULL; unsigned long flags; struct scsi_target *starget; + struct sas_rphy *rphy = NULL; if (!sdev->hostdata) return; @@ -3569,7 +4652,14 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev) scsi_tgt_priv_data->num_luns--; spin_lock_irqsave(&mrioc->tgtdev_lock, flags); - tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } + if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) tgt_dev->starget = NULL; if (tgt_dev) @@ -3621,77 +4711,71 @@ static void mpi3mr_target_destroy(struct scsi_target *starget) } /** - * mpi3mr_slave_configure - Slave configure callback handler + * mpi3mr_sdev_configure - Slave configure callback handler * @sdev: SCSI device reference + * @lim: queue limits * * Configure queue depth, max hardware sectors and virt boundary * as required * * Return: 0 always. */ -static int mpi3mr_slave_configure(struct scsi_device *sdev) +static int mpi3mr_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct scsi_target *starget; struct Scsi_Host *shost; struct mpi3mr_ioc *mrioc; - struct mpi3mr_tgt_dev *tgt_dev; + struct mpi3mr_tgt_dev *tgt_dev = NULL; unsigned long flags; int retval = 0; + struct sas_rphy *rphy = NULL; starget = scsi_target(sdev); shost = dev_to_shost(&starget->dev); mrioc = shost_priv(shost); spin_lock_irqsave(&mrioc->tgtdev_lock, flags); - tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); if (!tgt_dev) return -ENXIO; mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); - switch (tgt_dev->dev_type) { - case MPI3_DEVICE_DEVFORM_PCIE: - /*The block layer hw sector size = 512*/ - if ((tgt_dev->dev_spec.pcie_inf.dev_info & - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == - MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { - blk_queue_max_hw_sectors(sdev->request_queue, - tgt_dev->dev_spec.pcie_inf.mdts / 512); - if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) - blk_queue_virt_boundary(sdev->request_queue, - ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); - else - blk_queue_virt_boundary(sdev->request_queue, - ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); - } - break; - default: - break; - } - mpi3mr_tgtdev_put(tgt_dev); + sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); + mpi3mr_configure_tgt_dev(tgt_dev, lim); + mpi3mr_tgtdev_put(tgt_dev); return retval; } /** - * mpi3mr_slave_alloc -Slave alloc callback handler + * mpi3mr_sdev_init -Slave alloc callback handler * @sdev: SCSI device reference * * Allocate per device(lun) private data and initialize it. * * Return: 0 on success -ENOMEM on memory allocation failure. */ -static int mpi3mr_slave_alloc(struct scsi_device *sdev) +static int mpi3mr_sdev_init(struct scsi_device *sdev) { struct Scsi_Host *shost; struct mpi3mr_ioc *mrioc; struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; - struct mpi3mr_tgt_dev *tgt_dev; + struct mpi3mr_tgt_dev *tgt_dev = NULL; struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; unsigned long flags; struct scsi_target *starget; int retval = 0; + struct sas_rphy *rphy = NULL; starget = scsi_target(sdev); shost = dev_to_shost(&starget->dev); @@ -3699,7 +4783,14 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev) scsi_tgt_priv_data = starget->hostdata; spin_lock_irqsave(&mrioc->tgtdev_lock, flags); - tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + + if (starget->channel == mrioc->scsi_device_channel) + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + } if (tgt_dev) { if (tgt_dev->starget == NULL) @@ -3742,6 +4833,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget) struct mpi3mr_tgt_dev *tgt_dev; unsigned long flags; int retval = 0; + struct sas_rphy *rphy = NULL; scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); if (!scsi_tgt_priv_data) @@ -3750,18 +4842,50 @@ static int mpi3mr_target_alloc(struct scsi_target *starget) starget->hostdata = scsi_tgt_priv_data; spin_lock_irqsave(&mrioc->tgtdev_lock, flags); - tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); - if (tgt_dev && !tgt_dev->is_hidden) { - scsi_tgt_priv_data->starget = starget; - scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; - scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; - scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; - scsi_tgt_priv_data->tgt_dev = tgt_dev; - tgt_dev->starget = starget; - atomic_set(&scsi_tgt_priv_data->block_io, 0); - retval = 0; - } else - retval = -ENXIO; + if (starget->channel == mrioc->scsi_device_channel) { + tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); + if (tgt_dev && !tgt_dev->is_hidden) { + scsi_tgt_priv_data->starget = starget; + scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; + scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; + scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; + scsi_tgt_priv_data->tgt_dev = tgt_dev; + tgt_dev->starget = starget; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + retval = 0; + if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && + ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == + MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && + ((tgt_dev->dev_spec.pcie_inf.dev_info & + MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != + MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) + scsi_tgt_priv_data->dev_nvme_dif = 1; + scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; + scsi_tgt_priv_data->wslen = tgt_dev->wslen; + if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) + scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; + } else + retval = -ENXIO; + } else if (mrioc->sas_transport_enabled && !starget->channel) { + rphy = dev_to_rphy(starget->dev.parent); + tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, + rphy->identify.sas_address, rphy); + if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && + (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { + scsi_tgt_priv_data->starget = starget; + scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; + scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; + scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; + scsi_tgt_priv_data->tgt_dev = tgt_dev; + scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; + scsi_tgt_priv_data->wslen = tgt_dev->wslen; + tgt_dev->starget = starget; + atomic_set(&scsi_tgt_priv_data->block_io, 0); + retval = 0; + } else + retval = -ENXIO; + } spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); return retval; @@ -3915,10 +5039,19 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, int retval = 0; u16 dev_handle; u16 host_tag; - u32 scsiio_flags = 0; + u32 scsiio_flags = 0, data_len_blks = 0; struct request *rq = scsi_cmd_to_rq(scmd); int iprio_class; u8 is_pcie_dev = 0; + u32 tracked_io_sz = 0; + u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; + struct mpi3mr_throttle_group_info *tg = NULL; + + if (mrioc->unrecoverable) { + scmd->result = DID_ERROR << 16; + scsi_done(scmd); + goto out; + } sdev_priv_data = scmd->device->hostdata; if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { @@ -3934,22 +5067,22 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, goto out; } - if (mrioc->reset_in_progress) { - retval = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - stgt_priv_data = sdev_priv_data->tgt_priv_data; - dev_handle = stgt_priv_data->dev_handle; - if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { - scmd->result = DID_NO_CONNECT << 16; + + /* Avoid error handling escalation when device is removed or blocked */ + + if (scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY && + (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); scsi_done(scmd); goto out; } - if (stgt_priv_data->dev_removed) { - scmd->result = DID_NO_CONNECT << 16; - scsi_done(scmd); + + if (mrioc->reset_in_progress || mrioc->prepare_for_reset + || mrioc->block_on_pci_err) { + retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -3963,6 +5096,17 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, goto out; } + if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (stgt_priv_data->dev_removed) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) is_pcie_dev = 1; if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && @@ -4003,6 +5147,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); + if (stgt_priv_data->wslen) + mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, + stgt_priv_data->wslen); + memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); scsiio_req->dev_handle = cpu_to_le16(dev_handle); @@ -4016,11 +5164,50 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, goto out; } op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; + data_len_blks = scsi_bufflen(scmd) >> 9; + if ((data_len_blks >= mrioc->io_throttle_data_length) && + stgt_priv_data->io_throttle_enabled) { + tracked_io_sz = data_len_blks; + tg = stgt_priv_data->throttle_group; + if (tg) { + ioc_pend_data_len = atomic_add_return(data_len_blks, + &mrioc->pend_large_data_sz); + tg_pend_data_len = atomic_add_return(data_len_blks, + &tg->pend_large_data_sz); + if (!tg->io_divert && ((ioc_pend_data_len >= + mrioc->io_throttle_high) || + (tg_pend_data_len >= tg->high))) { + tg->io_divert = 1; + tg->need_qd_reduction = 1; + mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, + tg, 1); + mpi3mr_queue_qd_reduction_event(mrioc, tg); + } + } else { + ioc_pend_data_len = atomic_add_return(data_len_blks, + &mrioc->pend_large_data_sz); + if (ioc_pend_data_len >= mrioc->io_throttle_high) + stgt_priv_data->io_divert = 1; + } + } + + if (stgt_priv_data->io_divert) { + scsiio_req->msg_flags |= + MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; + scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; + } + scsiio_req->flags |= cpu_to_le32(scsiio_flags); if (mpi3mr_op_request_post(mrioc, op_req_q, scmd_priv_data->mpi3mr_scsiio_req)) { mpi3mr_clear_scmd_priv(mrioc, scmd); retval = SCSI_MLQUEUE_HOST_BUSY; + if (tracked_io_sz) { + atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); + if (tg) + atomic_sub(tracked_io_sz, + &tg->pend_large_data_sz); + } goto out; } @@ -4028,21 +5215,23 @@ out: return retval; } -static struct scsi_host_template mpi3mr_driver_template = { +static const struct scsi_host_template mpi3mr_driver_template = { .module = THIS_MODULE, .name = "MPI3 Storage Controller", .proc_name = MPI3MR_DRIVER_NAME, .queuecommand = mpi3mr_qcmd, .target_alloc = mpi3mr_target_alloc, - .slave_alloc = mpi3mr_slave_alloc, - .slave_configure = mpi3mr_slave_configure, + .sdev_init = mpi3mr_sdev_init, + .sdev_configure = mpi3mr_sdev_configure, .target_destroy = mpi3mr_target_destroy, - .slave_destroy = mpi3mr_slave_destroy, + .sdev_destroy = mpi3mr_sdev_destroy, .scan_finished = mpi3mr_scan_finished, .scan_start = mpi3mr_scan_start, .change_queue_depth = mpi3mr_change_queue_depth, + .eh_abort_handler = mpi3mr_eh_abort, .eh_device_reset_handler = mpi3mr_eh_dev_reset, .eh_target_reset_handler = mpi3mr_eh_target_reset, + .eh_bus_reset_handler = mpi3mr_eh_bus_reset, .eh_host_reset_handler = mpi3mr_eh_host_reset, .bios_param = mpi3mr_bios_param, .map_queues = mpi3mr_map_queues, @@ -4050,14 +5239,16 @@ static struct scsi_host_template mpi3mr_driver_template = { .no_write_same = 1, .can_queue = 1, .this_id = -1, - .sg_tablesize = MPI3MR_SG_DEPTH, + .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, /* max xfer supported is 1M (2K in 512 byte sized sectors) */ - .max_sectors = 2048, + .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, .max_segment_size = 0xffffffff, .track_queue_depth = 1, .cmd_size = sizeof(struct scmd_priv), + .shost_groups = mpi3mr_host_groups, + .sdev_groups = mpi3mr_dev_groups, }; /** @@ -4184,7 +5375,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } mrioc = shost_priv(shost); - mrioc->id = mrioc_ids++; + retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); + if (retval < 0) + goto id_alloc_failed; + mrioc->id = (u8)retval; sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); INIT_LIST_HEAD(&mrioc->list); @@ -4199,27 +5393,56 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&mrioc->tgtdev_lock); spin_lock_init(&mrioc->watchdog_lock); spin_lock_init(&mrioc->chain_buf_lock); + spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock); + spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock); + spin_lock_init(&mrioc->sas_node_lock); + spin_lock_init(&mrioc->trigger_lock); INIT_LIST_HEAD(&mrioc->fwevt_list); INIT_LIST_HEAD(&mrioc->tgtdev_list); INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); + INIT_LIST_HEAD(&mrioc->sas_expander_list); + INIT_LIST_HEAD(&mrioc->hba_port_table_list); + INIT_LIST_HEAD(&mrioc->enclosure_list); mutex_init(&mrioc->reset_mutex); mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); + mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); + mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); + mpi3mr_init_drv_cmd(&mrioc->transport_cmds, + MPI3MR_HOSTTAG_TRANSPORT_CMDS); for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); - if (pdev->revision) + for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) + mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], + MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); + + if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && + !pdev->revision) + mrioc->enable_segqueue = false; + else mrioc->enable_segqueue = true; init_waitqueue_head(&mrioc->reset_waitq); mrioc->logging_level = logging_level; mrioc->shost = shost; mrioc->pdev = pdev; + mrioc->stop_bsgs = 1; + + mrioc->max_sgl_entries = max_sgl_entries; + if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) + mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; + else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) + mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; + else { + mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; + mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; + } /* init shost parameters */ shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; @@ -4229,6 +5452,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_channel = 0; shost->max_id = 0xFFFFFFFF; + shost->host_tagset = 1; + if (prot_mask >= 0) scsi_host_set_prot(shost, prot_mask); else { @@ -4254,10 +5479,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) else scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); - snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), - "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); mrioc->fwevt_worker_thread = alloc_ordered_workqueue( - mrioc->fwevt_worker_name, WQ_MEM_RECLAIM); + "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id); if (!mrioc->fwevt_worker_thread) { ioc_err(mrioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -4283,7 +5506,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->nr_maps = 3; shost->can_queue = mrioc->max_host_ios; - shost->sg_tablesize = MPI3MR_SG_DEPTH; + shost->sg_tablesize = mrioc->max_sgl_entries; shost->max_id = mrioc->facts.max_perids + 1; retval = scsi_add_host(shost, &pdev->dev); @@ -4294,6 +5517,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) } scsi_scan_host(shost); + mpi3mr_bsg_init(mrioc); return retval; addhost_failed: @@ -4305,9 +5529,11 @@ init_ioc_failed: resource_alloc_failed: destroy_workqueue(mrioc->fwevt_worker_thread); fwevtthread_failed: + ida_free(&mrioc_ida, mrioc->id); spin_lock(&mrioc_list_lock); list_del(&mrioc->list); spin_unlock(&mrioc_list_lock); +id_alloc_failed: scsi_host_put(shost); shost_failed: return retval; @@ -4330,6 +5556,8 @@ static void mpi3mr_remove(struct pci_dev *pdev) struct workqueue_struct *wq; unsigned long flags; struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_hba_port *port, *hba_port_next; + struct mpi3mr_sas_node *sas_expander, *sas_expander_next; if (!shost) return; @@ -4338,6 +5566,19 @@ static void mpi3mr_remove(struct pci_dev *pdev) while (mrioc->reset_in_progress || mrioc->is_driver_loading) ssleep(1); + if (mrioc->block_on_pci_err) { + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mrioc->unrecoverable = 1; + } + + if (!pci_device_is_present(mrioc->pdev) || + mrioc->pci_err_recovery) { + mrioc->unrecoverable = 1; + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + } + + mpi3mr_bsg_exit(mrioc); mrioc->stop_drv_processing = 1; mpi3mr_cleanup_fwevt_list(mrioc); spin_lock_irqsave(&mrioc->fwevt_lock, flags); @@ -4346,12 +5587,16 @@ static void mpi3mr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); if (wq) destroy_workqueue(wq); - scsi_remove_host(shost); + + if (mrioc->sas_transport_enabled) + sas_remove_host(shost); + else + scsi_remove_host(shost); list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, list) { mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); - mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); + mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); mpi3mr_tgtdev_put(tgtdev); } mpi3mr_stop_watchdog(mrioc); @@ -4359,6 +5604,29 @@ static void mpi3mr_remove(struct pci_dev *pdev) mpi3mr_free_mem(mrioc); mpi3mr_cleanup_resources(mrioc); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, + &mrioc->sas_expander_list, list) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + mpi3mr_expander_node_remove(mrioc, sas_expander); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + } + list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { + ioc_info(mrioc, + "removing hba_port entry: %p port: %d from hba_port list\n", + port, port->port_id); + list_del(&port->list); + kfree(port); + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->sas_hba.num_phys) { + kfree(mrioc->sas_hba.phy); + mrioc->sas_hba.phy = NULL; + mrioc->sas_hba.num_phys = 0; + } + + ida_free(&mrioc_ida, mrioc->id); spin_lock(&mrioc_list_lock); list_del(&mrioc->list); spin_unlock(&mrioc_list_lock); @@ -4403,22 +5671,21 @@ static void mpi3mr_shutdown(struct pci_dev *pdev) mpi3mr_cleanup_resources(mrioc); } -#ifdef CONFIG_PM /** * mpi3mr_suspend - PCI power management suspend callback - * @pdev: PCI device instance - * @state: New power state + * @dev: Device struct * * Change the power state to the given value and cleanup the IOC * by issuing MUR and shutdown notification * * Return: 0 always. */ -static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused +mpi3mr_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *shost = pci_get_drvdata(pdev); struct mpi3mr_ioc *mrioc; - pci_power_t device_state; if (!shost) return 0; @@ -4432,11 +5699,8 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) mpi3mr_stop_watchdog(mrioc); mpi3mr_cleanup_ioc(mrioc); - device_state = pci_choose_state(pdev, state); - ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", - pdev, pci_name(pdev), device_state); - pci_save_state(pdev); - pci_set_power_state(pdev, device_state); + ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", + pdev, pci_name(pdev)); mpi3mr_cleanup_resources(mrioc); return 0; @@ -4444,15 +5708,17 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) /** * mpi3mr_resume - PCI power management resume callback - * @pdev: PCI device instance + * @dev: Device struct * * Restore the power state to D0 and reinitialize the controller * and resume I/O operations to the target devices * * Return: 0 on success, non-zero on failure */ -static int mpi3mr_resume(struct pci_dev *pdev) +static int __maybe_unused +mpi3mr_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct Scsi_Host *shost = pci_get_drvdata(pdev); struct mpi3mr_ioc *mrioc; pci_power_t device_state = pdev->current_state; @@ -4465,9 +5731,6 @@ static int mpi3mr_resume(struct pci_dev *pdev) ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", pdev, pci_name(pdev), device_state); - pci_set_power_state(pdev, PCI_D0); - pci_enable_wake(pdev, PCI_D0, 0); - pci_restore_state(pdev); mrioc->pdev = pdev; mrioc->cpu_count = num_online_cpus(); r = mpi3mr_setup_resources(mrioc); @@ -4478,40 +5741,255 @@ static int mpi3mr_resume(struct pci_dev *pdev) } mrioc->stop_drv_processing = 0; + mpi3mr_invalidate_devhandles(mrioc); + mpi3mr_free_enclosure_list(mrioc); mpi3mr_memset_buffers(mrioc); r = mpi3mr_reinit_ioc(mrioc, 1); if (r) { ioc_err(mrioc, "resuming controller failed[%d]\n", r); return r; } + ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); scsi_unblock_requests(shost); + mrioc->device_refresh_on = 0; mpi3mr_start_watchdog(mrioc); return 0; } -#endif + +/** + * mpi3mr_pcierr_error_detected - PCI error detected callback + * @pdev: PCI device instance + * @state: channel state + * + * This function is called by the PCI error recovery driver and + * based on the state passed the driver decides what actions to + * be recommended back to PCI driver. + * + * For all of the states if there is no valid mrioc or scsi host + * references in the PCI device then this function will return + * the result as disconnect. + * + * For normal state, this function will return the result as can + * recover. + * + * For frozen state, this function will block for any pending + * controller initialization or re-initialization to complete, + * stop any new interactions with the controller and return + * status as reset required. + * + * For permanent failure state, this function will mark the + * controller as unrecoverable and return status as disconnect. + * + * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or + * DISCONNECT based on the controller state. + */ +static pci_ers_result_t +mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; + + dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__, + state); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + do { + if (mrioc->reset_in_progress || mrioc->is_driver_loading) + ssleep(1); + else + break; + } while (--timeout); + + if (!timeout) { + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + mrioc->unrecoverable = 1; + mpi3mr_stop_watchdog(mrioc); + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + return PCI_ERS_RESULT_DISCONNECT; + } + + scsi_block_requests(mrioc->shost); + mpi3mr_stop_watchdog(mrioc); + mpi3mr_cleanup_resources(mrioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + mrioc->pci_err_recovery = true; + mrioc->block_on_pci_err = true; + mrioc->unrecoverable = 1; + mpi3mr_stop_watchdog(mrioc); + mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); + return PCI_ERS_RESULT_DISCONNECT; + default: + return PCI_ERS_RESULT_DISCONNECT; + } +} + +/** + * mpi3mr_pcierr_slot_reset - Post slot reset callback + * @pdev: PCI device instance + * + * This function is called by the PCI error recovery driver + * after a slot or link reset issued by it for the recovery, the + * driver is expected to bring back the controller and + * initialize it. + * + * This function restores PCI state and reinitializes controller + * resources and the controller, this blocks for any pending + * reset to complete. + * + * Returns: PCI_ERS_RESULT_DISCONNECT on failure or + * PCI_ERS_RESULT_RECOVERED + */ +static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + unsigned int timeout = MPI3MR_RESET_TIMEOUT; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + do { + if (mrioc->reset_in_progress) + ssleep(1); + else + break; + } while (--timeout); + + if (!timeout) + goto out_failed; + + pci_restore_state(pdev); + + if (mpi3mr_setup_resources(mrioc)) { + ioc_err(mrioc, "setup resources failed\n"); + goto out_failed; + } + mrioc->unrecoverable = 0; + mrioc->pci_err_recovery = false; + + if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0)) + goto out_failed; + + return PCI_ERS_RESULT_RECOVERED; + +out_failed: + mrioc->unrecoverable = 1; + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mpi3mr_start_watchdog(mrioc); + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * mpi3mr_pcierr_resume - PCI error recovery resume + * callback + * @pdev: PCI device instance + * + * This function enables all I/O and IOCTLs post reset issued as + * part of the PCI error recovery + * + * Return: Nothing. + */ +static void mpi3mr_pcierr_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + if (mrioc->block_on_pci_err) { + mrioc->block_on_pci_err = false; + scsi_unblock_requests(shost); + mpi3mr_start_watchdog(mrioc); + } +} + +/** + * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback + * @pdev: PCI device instance + * + * This is called only if mpi3mr_pcierr_error_detected returns + * PCI_ERS_RESULT_CAN_RECOVER. + * + * Return: PCI_ERS_RESULT_DISCONNECT when the controller is + * unrecoverable or when the shost/mrioc reference cannot be + * found, else return PCI_ERS_RESULT_RECOVERED + */ +static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost; + struct mpi3mr_ioc *mrioc; + + dev_info(&pdev->dev, "%s: callback invoked\n", __func__); + + shost = pci_get_drvdata(pdev); + mrioc = shost_priv(shost); + + if (mrioc->unrecoverable) + return PCI_ERS_RESULT_DISCONNECT; + + return PCI_ERS_RESULT_RECOVERED; +} static const struct pci_device_id mpi3mr_pci_id_table[] = { { PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) }, + { + PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, + MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) + }, + { + PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, + MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) + }, { 0 } }; MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); +static const struct pci_error_handlers mpi3mr_err_handler = { + .error_detected = mpi3mr_pcierr_error_detected, + .mmio_enabled = mpi3mr_pcierr_mmio_enabled, + .slot_reset = mpi3mr_pcierr_slot_reset, + .resume = mpi3mr_pcierr_resume, +}; + +static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); + static struct pci_driver mpi3mr_pci_driver = { .name = MPI3MR_DRIVER_NAME, .id_table = mpi3mr_pci_id_table, .probe = mpi3mr_probe, .remove = mpi3mr_remove, .shutdown = mpi3mr_shutdown, -#ifdef CONFIG_PM - .suspend = mpi3mr_suspend, - .resume = mpi3mr_resume, -#endif + .err_handler = &mpi3mr_err_handler, + .driver.pm = &mpi3mr_pm_ops, }; +static ssize_t event_counter_show(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); +} +static DRIVER_ATTR_RO(event_counter); + static int __init mpi3mr_init(void) { int ret_val; @@ -4519,8 +5997,33 @@ static int __init mpi3mr_init(void) pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); + mpi3mr_transport_template = + sas_attach_transport(&mpi3mr_transport_functions); + if (!mpi3mr_transport_template) { + pr_err("%s failed to load due to sas transport attach failure\n", + MPI3MR_DRIVER_NAME); + return -ENODEV; + } + ret_val = pci_register_driver(&mpi3mr_pci_driver); + if (ret_val) { + pr_err("%s failed to load due to pci register driver failure\n", + MPI3MR_DRIVER_NAME); + goto err_pci_reg_fail; + } + + ret_val = driver_create_file(&mpi3mr_pci_driver.driver, + &driver_attr_event_counter); + if (ret_val) + goto err_event_counter; + + return ret_val; + +err_event_counter: + pci_unregister_driver(&mpi3mr_pci_driver); +err_pci_reg_fail: + sas_release_transport(mpi3mr_transport_template); return ret_val; } @@ -4534,7 +6037,11 @@ static void __exit mpi3mr_exit(void) pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); + driver_remove_file(&mpi3mr_pci_driver.driver, + &driver_attr_event_counter); pci_unregister_driver(&mpi3mr_pci_driver); + sas_release_transport(mpi3mr_transport_template); + ida_destroy(&mrioc_ida); } module_init(mpi3mr_init); |
