diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
| -rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 302 |
1 files changed, 141 insertions, 161 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e1dfa96c2a55..b1460b16dd91 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -595,16 +595,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * timeout)); + jiffies + secs_to_jiffies(timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); clear_bit(HBA_HBEAT_INP, &phba->hba_flag); clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); if (test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -1196,7 +1196,7 @@ lpfc_hb_timeout(struct timer_list *t) uint32_t tmo_posted; unsigned long iflag; - phba = from_timer(phba, t, hb_tmofunc); + phba = timer_container_of(phba, t, hb_tmofunc); /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); @@ -1228,7 +1228,7 @@ lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; - phba = from_timer(phba, t, rrq_tmr); + phba = timer_container_of(phba, t, rrq_tmr); if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); return; @@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&phba->hb_tmofunc, jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); return; } @@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) @@ -1907,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, uint32_t intr_mode; LPFC_MBOXQ_t *mboxq; + /* Notifying the transport that the targets are going offline. */ + lpfc_scsi_dev_block(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { /* @@ -1943,6 +1946,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, lpfc_offline_prep(phba, mbx_action); lpfc_sli_flush_io_rings(phba); + lpfc_nvmels_flush_cmd(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); @@ -2623,27 +2627,33 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: - m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP1150", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: m = (typeof(m)){"LP111", "PCI-X2", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: - m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_SCSP: - m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR_DCSP: - m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; + m = (typeof(m)){"LP2105", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_ZMID: - m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1150", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZSMB: - m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe111", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: m = (typeof(m)){"LP101", "PCI-X", @@ -2662,22 +2672,28 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: - m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_MID: - m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1250", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SMB: - m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe121", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_DCSP: - m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_SCSP: - m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT_S: - m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe12000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", @@ -2693,22 +2709,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; - m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe10100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_TOMCAT: oneConnect = 1; - m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe11100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_FALCON: m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", - "EmulexSecure Fibre"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: - m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", @@ -2716,12 +2735,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_LANCER_FCOE: oneConnect = 1; - m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", - "Obsolete, Unsupported FCoE"}; + "Obsolete, Unsupported FCoE Adapter"}; break; case PCI_DEVICE_ID_LANCER_G6_FC: m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; @@ -2735,7 +2755,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; - m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe14000", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; break; default: m = (typeof(m)){"Unknown", "", ""}; @@ -3036,19 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_vmid_vport_cleanup(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (vport->port_type != LPFC_PHYSICAL_PORT && - ndlp->nlp_DID == Fabric_DID) { - /* Just free up ndlp with Fabric_DID for vports */ - lpfc_nlp_put(ndlp); - continue; - } - - if (ndlp->nlp_DID == Fabric_Cntl_DID && - ndlp->nlp_state == NLP_STE_UNUSED_NODE) { - lpfc_nlp_put(ndlp); - continue; - } - /* Fabric Ports not in UNMAPPED state are cleaned up in the * DEVICE_RM event. */ @@ -3092,7 +3100,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3118,8 +3127,8 @@ lpfc_cleanup(struct lpfc_vport *vport) void lpfc_stop_vport_timers(struct lpfc_vport *vport) { - del_timer_sync(&vport->els_tmofunc); - del_timer_sync(&vport->delayed_disc_tmo); + timer_delete_sync(&vport->els_tmofunc); + timer_delete_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -3138,7 +3147,7 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* Now, try to stop the timer */ - del_timer(&phba->fcf.redisc_wait); + timer_delete(&phba->fcf.redisc_wait); } /** @@ -3300,12 +3309,12 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) lpfc_stop_vport_timers(phba->pport); cancel_delayed_work_sync(&phba->eq_delay_work); cancel_delayed_work_sync(&phba->idle_stat_delay_work); - del_timer_sync(&phba->sli.mbox_tmo); - del_timer_sync(&phba->fabric_block_timer); - del_timer_sync(&phba->eratt_poll); - del_timer_sync(&phba->hb_tmofunc); + timer_delete_sync(&phba->sli.mbox_tmo); + timer_delete_sync(&phba->fabric_block_timer); + timer_delete_sync(&phba->eratt_poll); + timer_delete_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { - del_timer_sync(&phba->rrq_tmr); + timer_delete_sync(&phba->rrq_tmr); clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); } clear_bit(HBA_HBEAT_INP, &phba->hba_flag); @@ -3314,7 +3323,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ - del_timer_sync(&phba->fcp_poll_timer); + timer_delete_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device specific driver timers */ @@ -3352,15 +3361,15 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) spin_unlock_irqrestore(&phba->hbalock, iflag); if (mbx_action == LPFC_MBX_NO_WAIT) return; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; } spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3379,7 +3388,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3387,7 +3396,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3397,10 +3406,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3409,14 +3418,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3820,35 +3835,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -3862,8 +3854,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) * Otherwise, let dev_loss take care of * the node. */ - if (!(ndlp->save_flags & - NLP_IN_RECOV_POST_DEV_LOSS) && + if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS, + &ndlp->save_flags) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) lpfc_disc_state_machine @@ -4699,6 +4691,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) uint64_t wwn; bool use_no_reset_hba = false; int rc; + u8 if_type; if (lpfc_no_hba_reset_cnt) { if (phba->sli_rev < LPFC_SLI_REV4 && @@ -4773,10 +4766,24 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; - if (phba->sli_rev == LPFC_SLI_REV4) - shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; - else + + /* Set max_cmd_len applicable to ASIC support */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); + switch (if_type) { + case LPFC_SLI_INTF_IF_TYPE_2: + fallthrough; + case LPFC_SLI_INTF_IF_TYPE_6: + shost->max_cmd_len = LPFC_FCP_CDB_LEN_32; + break; + default: + shost->max_cmd_len = LPFC_FCP_CDB_LEN; + break; + } + } else { shost->max_cmd_len = LPFC_FCP_CDB_LEN; + } if (phba->sli_rev == LPFC_SLI_REV4) { if (!phba->cfg_fcp_mq_threshold || @@ -4924,14 +4931,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= msecs_to_jiffies(30 * 1000)) { + if (time >= secs_to_jiffies(30)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= msecs_to_jiffies(15 * 1000) && + if (time >= secs_to_jiffies(15) && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " @@ -4945,7 +4952,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (!atomic_read(&vport->fc_map_cnt) && - time < msecs_to_jiffies(2 * 1000)) + time < secs_to_jiffies(2)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -5128,7 +5135,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) static void lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); + struct lpfc_hba *phba = timer_container_of(phba, t, fcf.redisc_wait); /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); @@ -5159,7 +5166,8 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) static void lpfc_vmid_poll(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); + struct lpfc_hba *phba = timer_container_of(phba, t, + inactive_vmid_poll); u32 wake_up = 0; /* check if there is a need to issue QFPA */ @@ -5179,8 +5187,8 @@ lpfc_vmid_poll(struct timer_list *t) lpfc_worker_wake_up(phba); /* restart the timer for the next iteration */ - mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * - LPFC_VMID_TIMER)); + mod_timer(&phba->inactive_vmid_poll, + jiffies + secs_to_jiffies(LPFC_VMID_TIMER)); } /** @@ -6909,10 +6917,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + jiffies + secs_to_jiffies(1)); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -7917,8 +7923,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) int longs; int extra; uint64_t wwn; - u32 if_type; - u32 if_fam; phba->sli4_hba.num_present_cpu = lpfc_present_cpu; phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; @@ -7940,7 +7944,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Allocate all driver workqueues here */ /* The lpfc_wq workqueue for deferred irq use */ - phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); + phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!phba->wq) return -ENOMEM; @@ -7954,11 +7958,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); /* CMF congestion timer */ - hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_timer.function = lpfc_cmf_timer; + hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); /* CMF 1 minute stats collection timer */ - hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; + hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); /* * Control structure for handling external multi-buffer mailbox @@ -8179,28 +8182,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) */ rc = lpfc_get_sli4_parameters(phba, mboxq); if (rc) { - if_type = bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf); - if_fam = bf_get(lpfc_sli_intf_sli_family, - &phba->sli4_hba.sli_intf); - if (phba->sli4_hba.extents_in_use && - phba->sli4_hba.rpi_hdrs_in_use) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2999 Unsupported SLI4 Parameters " - "Extents and RPI headers enabled.\n"); - if (if_type == LPFC_SLI_INTF_IF_TYPE_0 && - if_fam == LPFC_SLI_INTF_FAMILY_BE2) { - mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; - goto out_free_bsmbx; - } - } - if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 && - if_fam == LPFC_SLI_INTF_FAMILY_BE2)) { - mempool_free(mboxq, phba->mbox_mem_pool); - rc = -EIO; - goto out_free_bsmbx; - } + lpfc_log_msg(phba, KERN_WARNING, LOG_INIT, + "2999 Could not get SLI4 parameters\n"); + rc = -EIO; + mempool_free(mboxq, phba->mbox_mem_pool); + goto out_free_bsmbx; } /* @@ -8301,10 +8287,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, phba->cfg_nvme_seg_cnt); - if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) - i = phba->cfg_sg_dma_buf_size; - else - i = SLI4_PAGE_SIZE; + i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE); phba->lpfc_sg_dma_buf_pool = dma_pool_create("lpfc_sg_dma_buf_pool", @@ -9093,9 +9076,9 @@ lpfc_setup_fdmi_mask(struct lpfc_vport *vport) vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; } - lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, - "6077 Setup FDMI mask: hba x%x port x%x\n", - vport->fdmi_hba_mask, vport->fdmi_port_mask); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6077 Setup FDMI mask: hba x%x port x%x\n", + vport->fdmi_hba_mask, vport->fdmi_port_mask); } /** @@ -10436,6 +10419,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *eqcpup; struct lpfc_eq_intr_info *eqi; + u32 wqesize; /* * Create HBA Record arrays. @@ -10655,9 +10639,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) * Create ELS Work Queues */ - /* Create slow-path ELS Work Queue */ + /* + * Create slow-path ELS Work Queue. + * Increase the ELS WQ size when WQEs contain an embedded cdb + */ + wqesize = (phba->fcp_embed_io) ? + LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.wq_esize, + wqesize, phba->sli4_hba.wq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -11104,14 +11094,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.fw_func_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; - phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; - phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; phba->sli4_hba.physical_port = mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " - "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, - phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); + "3251 QUERY_FW_CFG: func_mode:x%x\n", + phba->sli4_hba.fw_func_mode); mempool_free(mboxq, phba->mbox_mem_pool); @@ -12760,7 +12747,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) * timer. Wait for the poll timer to retire. */ synchronize_rcu(); - del_timer_sync(&phba->cpuhp_poll_timer); + timer_delete_sync(&phba->cpuhp_poll_timer); } static void lpfc_cpuhp_remove(struct lpfc_hba *phba) @@ -12871,7 +12858,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) if (offline) { /* Find next online CPU on original mask */ - cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); + cpu_next = cpumask_next_wrap(cpu, orig_mask); cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); /* Found a valid CPU */ @@ -13168,6 +13155,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { + free_irq(phba->pcidev->irq, phba); pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); @@ -13248,6 +13236,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { + free_irq(phba->pcidev->irq, phba); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); @@ -13496,6 +13485,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Disable FW logging to host memory */ lpfc_ras_stop_fwlog(phba); + lpfc_sli4_queue_unset(phba); + /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); @@ -13861,12 +13852,7 @@ fcponly: if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; - rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "6400 Can't set dma maximum segment size\n"); - return rc; - } + dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); /* * Check whether the adapter supports an embedded copy of the @@ -14375,7 +14361,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) * as desired. * * Return codes - * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link + * PCI_ERS_RESULT_CAN_RECOVER - can be recovered without reset * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ @@ -14442,12 +14428,6 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev) pci_restore_state(pdev); - /* - * As the new kernel behavior of pci_restore_state() API call clears - * device saved_state flag, need to save the restored state again. - */ - pci_save_state(pdev); - if (pdev->is_busmaster) pci_set_master(pdev); |
