summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_hbadisc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_hbadisc.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c3416
1 files changed, 2155 insertions, 1261 deletions
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 60d6ca2f68c2..bb803f32bc1b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -25,11 +27,14 @@
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -37,8 +42,9 @@
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -65,36 +71,83 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
+static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
-void
-lpfc_terminate_rport_io(struct fc_rport *rport)
+static int
+lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
+{
+ if (ndlp->nlp_fc4_type ||
+ ndlp->nlp_type & NLP_FABRIC)
+ return 1;
+ return 0;
+}
+/* The source of a terminate rport I/O is either a dev_loss_tmo
+ * event or a call to fc_remove_host. While the rport should be
+ * valid during these downcalls, the transport can call twice
+ * in a single event. This routine provides somoe protection
+ * as the NDLP isn't really free, just released to the pool.
+ */
+static int
+lpfc_rport_invalid(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
- struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+
+ if (!rport) {
+ pr_err("**** %s: NULL rport, exit.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) {
+ pr_info("**** %s: devloss_callbk_done rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
rdata = rport->dd_data;
+ if (!rdata) {
+ pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
+
ndlp = rdata->pnode;
+ if (!rdata->pnode) {
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
+ return -EINVAL;
+ }
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
- printk(KERN_ERR "Cannot find remote node"
- " to terminate I/O Data x%x\n",
- rport->port_id);
- return;
+ if (!ndlp->vport) {
+ pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
+ "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
+ rport->scsi_target_id);
+ return -EINVAL;
}
+ return 0;
+}
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
- phba = ndlp->phba;
+ if (lpfc_rport_invalid(rport))
+ return;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
- "rport terminate: sid:x%x did:x%x flg:x%x",
- ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+ rdata = rport->dd_data;
+ ndlp = rdata->pnode;
+ vport = ndlp->vport;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%lx",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
- if (ndlp->nlp_sid != NLP_NO_SID) {
- lpfc_sli_abort_iocb(ndlp->vport,
- &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
+ if (ndlp->nlp_sid != NLP_NO_SID)
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
/*
@@ -103,77 +156,283 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
void
lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist * ndlp;
+ struct lpfc_nodelist *ndlp;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
- int put_node;
- int put_rport;
+ unsigned long iflags;
+ bool drop_initial_node_ref = false;
- rdata = rport->dd_data;
- ndlp = rdata->pnode;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
+ if (!ndlp)
return;
vport = ndlp->vport;
phba = vport->phba;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlosscb: sid:x%x did:x%x flg:x%x",
+ "rport devlosscb: sid:x%x did:x%x flg:x%lx",
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
- */
- if (vport->load_flag & FC_UNLOADING) {
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%lx "
+ "load_flag x%lx refcnt %u state %d xpt x%x\n",
+ ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
+
+ /* Don't schedule a worker thread event if the vport is going down. */
+ if (test_bit(FC_UNLOADING, &vport->load_flag) ||
+ (phba->sli_rev == LPFC_SLI_REV4 &&
+ !test_bit(HBA_SETUP, &phba->hba_flag))) {
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- if (put_node)
+
+ /* Only 1 thread can drop the initial node reference.
+ * If not registered for NVME and NLP_DROPPED flag is
+ * clear, remove the initial reference.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ drop_initial_node_ref = true;
+
+ /* The scsi_transport is done with the rport so lpfc cannot
+ * call to unregister.
+ */
+ if (ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
+ * unregister calls were made to the scsi and nvme
+ * transports and refcnt was already decremented. Clear
+ * the NLP_XPT_REGD flag only if the NVME nrport is
+ * confirmed unregistered.
+ */
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ /* Release scsi transport reference */
+ lpfc_nlp_put(ndlp);
+ } else {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ }
+ } else {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ }
+
+ if (drop_initial_node_ref)
lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- if (ndlp->nlp_type & NLP_FABRIC) {
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
+ return;
+ }
- /* If the WWPN of the rport and ndlp don't match, ignore it */
- if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
- put_device(&rport->dev);
+ if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6789 rport name %llx != node port name %llx",
+ rport->port_name,
+ wwn_to_u64(ndlp->nlp_portname.u.wwn));
+
+ evtp = &ndlp->dev_loss_evt;
+
+ if (!list_empty(&evtp->evt_listp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6790 rport name %llx dev_loss_evt pending\n",
+ rport->port_name);
+ return;
+ }
+
+ set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* If there is a PLOGI in progress, and we are in a
+ * NLP_NPR_2B_DISC state, don't turn off the flag.
+ */
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
+ clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+
+ /*
+ * The backend does not expect any more calls associated with this
+ * rport. Remove the association between rport and ndlp.
+ */
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ ndlp->rport = NULL;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_worker_wake_up(phba);
return;
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%lx load_flag x%lx refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ /* Node is in dev loss. No further transaction. */
+ clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
}
+}
- evtp = &ndlp->dev_loss_evt;
+/**
+ * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
+ * @vport: Pointer to vport context object.
+ *
+ * This function checks for idle VMID entries related to a particular vport. If
+ * found unused/idle, free them accordingly.
+ **/
+static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
+{
+ u16 keep;
+ u32 difftime = 0, r, bucket;
+ u64 *lta;
+ int cpu;
+ struct lpfc_vmid *vmp;
+
+ write_lock(&vport->vmid_lock);
+
+ if (!vport->cur_vmid_cnt)
+ goto out;
+
+ /* iterate through the table */
+ hash_for_each(vport->hash_table, bucket, vmp, hnode) {
+ keep = 0;
+ if (vmp->flag & LPFC_VMID_REGISTERED) {
+ /* check if the particular VMID is in use */
+ /* for all available per cpu variable */
+ for_each_possible_cpu(cpu) {
+ /* if last access time is less than timeout */
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+ difftime = (jiffies) - (*lta);
+ if ((vport->vmid_inactivity_timeout *
+ JIFFIES_PER_HR) > difftime) {
+ keep = 1;
+ break;
+ }
+ }
+
+ /* if none of the cpus have been used by the vm, */
+ /* remove the entry if already registered */
+ if (!keep) {
+ /* mark the entry for deregistration */
+ vmp->flag = LPFC_VMID_DE_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ if (vport->vmid_priority_tagging)
+ r = lpfc_vmid_uvem(vport, vmp, false);
+ else
+ r = lpfc_vmid_cmd(vport,
+ SLI_CTAS_DAPP_IDENT,
+ vmp);
+
+ /* decrement number of active vms and mark */
+ /* entry in slot as free */
+ write_lock(&vport->vmid_lock);
+ if (!r) {
+ struct lpfc_vmid *ht = vmp;
+
+ vport->cur_vmid_cnt--;
+ ht->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(ht->last_io_time);
+ ht->last_io_time = NULL;
+ hash_del(&ht->hnode);
+ }
+ }
+ }
+ }
+ out:
+ write_unlock(&vport->vmid_lock);
+}
+
+/**
+ * lpfc_check_inactive_vmid - VMID inactivity checker
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the worker thread to determine if an entry in
+ * the VMID table can be released since there was no I/O activity seen from that
+ * particular VM for the specified time. When this happens, the entry in the
+ * table is released and also the resources on the switch cleared.
+ **/
+
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
- if (!list_empty(&evtp->evt_listp))
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
return;
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
- spin_lock_irq(&phba->hbalock);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
+ lpfc_check_inactive_vmid_one(vport);
}
- spin_unlock_irq(&phba->hbalock);
+ lpfc_destroy_vport_work_array(phba, vports);
+}
- return;
+/**
+ * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to remote node object.
+ *
+ * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of
+ * node during dev_loss_tmo processing, then this function restores the nlp_put
+ * kref decrement from lpfc_dev_loss_tmo_handler.
+ **/
+void
+lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
+ clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ lpfc_nlp_get(ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
+ "8438 Devloss timeout reversed on DID x%x "
+ "refcnt %d ndlp %p flag x%lx "
+ "port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
+ ndlp->nlp_flag, vport->port_state);
+ }
}
/**
@@ -189,59 +448,32 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_rport_data *rdata;
- struct fc_rport *rport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
uint8_t *name;
- int put_node;
- int put_rport;
int warn_on = 0;
int fcf_inuse = 0;
+ bool recovering = false;
+ struct fc_vport *fc_vport = NULL;
+ unsigned long iflags;
- rport = ndlp->rport;
-
- if (!rport)
- return fcf_inuse;
-
- rdata = rport->dd_data;
- name = (uint8_t *) &ndlp->nlp_portname;
vport = ndlp->vport;
- phba = vport->phba;
+ name = (uint8_t *)&ndlp->nlp_portname;
+ phba = vport->phba;
if (phba->sli_rev == LPFC_SLI_REV4)
fcf_inuse = lpfc_fcf_inuse(phba);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport devlosstmo:did:x%x type:x%x id:x%x",
- ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
- ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
-
- /* Don't defer this if we are in the process of deleting the vport
- * or unloading the driver. The unload will cleanup the node
- * appropriately we just need to cleanup the ndlp rport info here.
- */
- if (vport->load_flag & FC_UNLOADING) {
- if (ndlp->nlp_sid != NLP_NO_SID) {
- /* flush the target */
- lpfc_sli_abort_iocb(vport,
- &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
- }
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
- return fcf_inuse;
- }
+ "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
+ /* If the driver is recovering the rport, ignore devloss. */
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0284 Devloss timeout Ignored on "
@@ -250,69 +482,163 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
+
+ clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
return fcf_inuse;
}
+ /* Fabric nodes are done. */
if (ndlp->nlp_type & NLP_FABRIC) {
- /* We will clean up these Nodes in linkup */
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
+ spin_lock_irqsave(&ndlp->lock, iflags);
+
+ /* The driver has to account for a race between any fabric
+ * node that's in recovery when dev_loss_tmo expires. When this
+ * happens, the driver has to allow node recovery.
+ */
+ switch (ndlp->nlp_DID) {
+ case Fabric_DID:
+ fc_vport = vport->fc_vport;
+ if (fc_vport) {
+ /* NPIV path. */
+ if (fc_vport->vport_state ==
+ FC_VPORT_INITIALIZING)
+ recovering = true;
+ } else {
+ /* Physical port path. */
+ if (test_bit(HBA_FLOGI_OUTSTANDING,
+ &phba->hba_flag))
+ recovering = true;
+ }
+ break;
+ case Fabric_Cntl_DID:
+ if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
+ recovering = true;
+ break;
+ case FDMI_DID:
+ fallthrough;
+ case NameServer_DID:
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
+ recovering = true;
+ break;
+ default:
+ /* Ensure the nlp_DID at least has the correct prefix.
+ * The fabric domain controller's last three nibbles
+ * vary so we handle it in the default case.
+ */
+ if (ndlp->nlp_DID & Fabric_DID_MASK) {
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
+ recovering = true;
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing
+ * the following lpfc_nlp_put is necessary after fabric node is
+ * recovered.
+ */
+ clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
+ if (recovering) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8436 Devloss timeout marked on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%lx port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags);
+ return fcf_inuse;
+ } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ /* Fabric node fully recovered before this dev_loss_tmo
+ * queue work is processed. Thus, ignore the
+ * dev_loss_tmo event.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
+ "8437 Devloss timeout ignored on "
+ "DID x%x refcnt %d ndlp %p "
+ "flag x%lx port_state = x%x\n",
+ ndlp->nlp_DID, kref_read(&ndlp->kref),
+ ndlp, ndlp->nlp_flag,
+ vport->port_state);
+ return fcf_inuse;
+ }
+
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
return fcf_inuse;
}
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
- /* flush the target */
- lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
if (warn_on) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%lx x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%lx x%x x%x\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
}
+ clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag);
- put_node = rdata->pnode != NULL;
- put_rport = ndlp->rport != NULL;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- if (put_node)
- lpfc_nlp_put(ndlp);
- if (put_rport)
- put_device(&rport->dev);
-
- if (!(vport->load_flag & FC_UNLOADING) &&
- !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
- (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+ /* If we are devloss, but we are in the process of rediscovering the
+ * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
+ */
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
+ return fcf_inuse;
+ }
+
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
return fcf_inuse;
}
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
+ if (!lpfc_issue_els_qfpa(vport))
+ vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/**
* lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
* @phba: Pointer to hba context object.
@@ -341,14 +667,15 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
if (!fcf_inuse)
return;
- if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+ if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) &&
+ !lpfc_fcf_inuse(phba)) {
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ if (test_and_set_bit(HBA_DEVLOSS_TMO,
+ &phba->hba_flag)) {
spin_unlock_irq(&phba->hbalock);
return;
}
- phba->hba_flag |= HBA_DEVLOSS_TMO;
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2847 Last remote node (x%x) using "
"FCF devloss tmo\n", nlp_did);
@@ -360,8 +687,9 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
"in progress\n");
return;
}
- if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
+ if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) &&
+ !test_bit(FCF_RR_INPROG, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2869 Devloss tmo to idle FIP engine, "
"unreg in-use FCF and rescan.\n");
@@ -369,11 +697,10 @@ lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
lpfc_unregister_fcf_rescan(phba);
return;
}
- spin_unlock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_TS_INPROG)
+ if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2870 FCF table scan in progress\n");
- if (phba->hba_flag & FCF_RR_INPROG)
+ if (test_bit(FCF_RR_INPROG, &phba->hba_flag))
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2871 FLOGI roundrobin FCF failover "
"in progress\n");
@@ -490,11 +817,12 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
return;
}
- fc_host_post_vendor_event(shost,
- fc_get_event_number(),
- evt_data_size,
- evt_data,
- LPFC_NL_VENDOR_ID);
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+ LPFC_NL_VENDOR_ID);
lpfc_free_fast_evt(phba, fast_evt_data);
return;
@@ -508,18 +836,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -539,6 +871,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
fcf_inuse,
nlp_did);
break;
+ case LPFC_EVT_RECOVER_PORT:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
+ /* decrement the node reference count held for
+ * this queued work
+ */
+ lpfc_nlp_put(ndlp);
+ break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
*(int *) (evtp->evt_arg1) = lpfc_online(phba);
@@ -583,7 +926,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
free_evt = 0;
break;
case LPFC_EVT_RESET_HBA:
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_reset_hba(phba);
break;
}
@@ -603,42 +946,63 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+ /* Handle VMID Events */
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_VMID_ISSUE_QFPA) {
+ lpfc_check_vmid_qfpa_issue(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_INACTIVE_VMID) {
+ lpfc_check_inactive_vmid(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_INACTIVE_VMID;
+ }
+ }
+
/* Process SLI4 events */
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
- if (phba->hba_flag & HBA_RRQ_ACTIVE)
+ if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag))
lpfc_handle_rrq_active(phba);
- if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
- lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+ if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag))
lpfc_sli4_els_xri_abort_event_proc(phba);
- if (phba->hba_flag & ASYNC_EVENT)
+ if (test_bit(ASYNC_EVENT, &phba->hba_flag))
lpfc_sli4_async_event_proc(phba);
- if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
- spin_unlock_irq(&phba->hbalock);
+ if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER,
+ &phba->hba_flag))
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- }
if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
lpfc_sli4_fcf_redisc_event_proc(phba);
}
@@ -660,6 +1024,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -670,37 +1036,37 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_mbox_timeout_handler(phba);
if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
lpfc_unblock_fabric_iocbs(phba);
- if (work_port_events & WORKER_FDMI_TMO)
- lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
- if (work_port_events & WORKER_RAMP_UP_QUEUE)
- lpfc_ramp_up_queue_handler(phba);
if (work_port_events & WORKER_DELAYED_DISC_TMO)
lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
- pring = &phba->sli.ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
- if ((status & HA_RXMASK) ||
- (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
- (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+ if (pring && (status & HA_RXMASK ||
+ pring->flag & LPFC_DEFERRED_RING_EVENT ||
+ test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
- /* Set the lpfc data pending flag */
- set_bit(LPFC_DATA_READY, &phba->data_flags);
+ /* Preserve legacy behavior. */
+ if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP) {
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
+ phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
(status &
HA_RXMASK));
}
}
- if ((phba->sli_rev == LPFC_SLI_REV4) &
- (!list_empty(&pring->txq)))
+ if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@@ -733,7 +1099,7 @@ lpfc_do_work(void *p)
struct lpfc_hba *phba = p;
int rc;
- set_user_nice(current, -20);
+ set_user_nice(current, MIN_NICE);
current->flags |= PF_NOFREEZE;
phba->data_flags = 0;
@@ -745,7 +1111,7 @@ lpfc_do_work(void *p)
|| kthread_should_stop()));
/* Signal wakeup shall terminate the worker thread */
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0433 Wakeup on signal: rc=x%x\n", rc);
break;
}
@@ -795,37 +1161,37 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
void
lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp, *next_ndlp;
- int rc;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- continue;
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
- ((vport->port_type == LPFC_NPIV_PORT) &&
- (ndlp->nlp_DID == NameServer_DID)))
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ ((ndlp->nlp_DID == NameServer_DID) ||
+ (ndlp->nlp_DID == FDMI_DID) ||
+ (ndlp->nlp_DID == Fabric_Cntl_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
if ((phba->sli_rev < LPFC_SLI_REV4) &&
(!remove && ndlp->nlp_type & NLP_FABRIC))
continue;
- rc = lpfc_disc_state_machine(vport, ndlp, NULL,
- remove
- ? NLP_EVT_DEVICE_RM
- : NLP_EVT_DEVICE_RECOVERY);
+
+ /* Notify transport of connectivity loss to trigger cleanup. */
+ if (phba->nvmet_support &&
+ ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nvmet_invalidate_host(phba, ndlp);
+
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ remove
+ ? NLP_EVT_DEVICE_RM
+ : NLP_EVT_DEVICE_RECOVERY);
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
}
}
@@ -852,9 +1218,12 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_LINKDOWN, 0);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Link Down: state:x%x rtry:x%x flg:x%x",
@@ -863,44 +1232,90 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
/* Stop delayed Nport discovery */
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_DELAYED;
- spin_unlock_irq(shost->host_lock);
- del_timer_sync(&vport->delayed_disc_tmo);
+ clear_bit(FC_DISC_DELAYED, &vport->fc_flag);
+ timer_delete_sync(&vport->delayed_disc_tmo);
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Assume success on link up */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
}
int
lpfc_linkdown(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
+
+ /* Decrement the held ndlp if there is a deferred flogi acc */
+ if (phba->defer_flogi_acc.flag) {
+ if (phba->defer_flogi_acc.ndlp) {
+ lpfc_nlp_put(phba->defer_flogi_acc.ndlp);
+ phba->defer_flogi_acc.ndlp = NULL;
+ }
+ }
+ phba->defer_flogi_acc.flag = false;
+
+ /* reinitialize initial HBA flag */
+ clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag);
+ clear_bit(HBA_RHBA_CMPL, &phba->hba_flag);
+
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_LBIT;
- spin_unlock_irq(shost->host_lock);
+ if (phba->sli4_hba.conf_trunk) {
+ phba->trunk_link.link0.state = 0;
+ phba->trunk_link.link1.state = 0;
+ phba->trunk_link.link2.state = 0;
+ phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
+ phba->sli4_hba.link_state.logical_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
+ }
+ clear_bit(FC_LBIT, &phba->pport->fc_flag);
}
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
+ if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
+
+ vports[i]->fc_myDID = 0;
+
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (phba->nvmet_support)
+ lpfc_nvmet_update_targetport(phba);
+ else
+ lpfc_nvme_update_localport(vports[i]);
+ }
}
+ }
lpfc_destroy_vport_work_array(phba, vports);
- /* Clean up any firmware default rpi's */
+
+ /* Clean up any SLI3 firmware default rpi's */
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
+ goto skip_unreg_did;
+
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -912,9 +1327,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
+ skip_unreg_did:
/* Setup myDID for link up if we are in pt2pt mode */
- if (phba->pport->fc_flag & FC_PT2PT) {
- phba->pport->fc_myDID = 0;
+ if (test_bit(FC_PT2PT, &phba->pport->fc_flag)) {
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
@@ -925,11 +1340,12 @@ lpfc_linkdown(struct lpfc_hba *phba)
mempool_free(mb, phba->mbox_mem_pool);
}
}
+ clear_bit(FC_PT2PT, &phba->pport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
}
-
return 0;
}
@@ -939,8 +1355,8 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -950,7 +1366,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
if (ndlp->nlp_DID != Fabric_DID)
lpfc_unreg_rpi(vport, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
/* Fail outstanding IO now since device is
* marked for PLOGI.
*/
@@ -965,7 +1381,7 @@ lpfc_linkup_port(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- if ((vport->load_flag & FC_UNLOADING) != 0)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -977,18 +1393,27 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+ if (phba->defer_flogi_acc.flag) {
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ } else {
+ clear_bit(FC_PT2PT, &vport->fc_flag);
+ clear_bit(FC_PT2PT_PLOGI, &vport->fc_flag);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
+ clear_bit(FC_NLP_MORE, &vport->fc_flag);
+ clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag);
+ }
+ set_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
- vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
+ lpfc_setup_fdmi_mask(vport);
- if (vport->fc_flag & FC_LBIT)
- lpfc_linkup_cleanup_nodes(vport);
-
+ lpfc_linkup_cleanup_nodes(vport);
}
static int
@@ -996,13 +1421,13 @@ lpfc_linkup(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
- lpfc_cleanup_wt_rrqs(phba);
phba->link_state = LPFC_LINK_UP;
/* Unblock fabric iocbs if they are blocked */
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
- del_timer_sync(&phba->fabric_block_timer);
+ timer_delete_sync(&phba->fabric_block_timer);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -1010,6 +1435,14 @@ lpfc_linkup(struct lpfc_hba *phba)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
+ /* Clear the pport flogi counter in case the link down was
+ * absorbed without an ACQE. No lock here - in worker thread
+ * and discovery is synchronized.
+ */
+ spin_lock_irq(shost->host_lock);
+ phba->pport->rcv_flogi_cnt = 0;
+ spin_unlock_irq(shost->host_lock);
+
return 0;
}
@@ -1017,26 +1450,24 @@ lpfc_linkup(struct lpfc_hba *phba)
* This routine handles processing a CLEAR_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
- * handed off to the SLI layer.
+ * handed off to the SLI layer. SLI3 only.
*/
static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t control;
/* Since we don't do discovery right now, turn these off here */
- psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
mb->mbxStatus, vport->port_state);
@@ -1063,9 +1494,7 @@ out:
"0225 Device Discovery completes\n");
mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_ABORT_DISCOVERY;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag);
lpfc_can_disctmo(vport);
@@ -1082,26 +1511,28 @@ out:
return;
}
-
-static void
+void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
-
- if (pmb->u.mb.mbxStatus)
- goto out;
+ LPFC_MBOXQ_t *sparam_mb;
+ u16 status = pmb->u.mb.mbxStatus;
+ int rc;
mempool_free(pmb, phba->mbox_mem_pool);
+ if (status)
+ goto out;
+
/* don't perform discovery for SLI4 loopback diagnostic test */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- !(phba->hba_flag & HBA_FCOE_MODE) &&
+ !test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
(phba->link_flag & LS_LOOPBACK_MODE))
return;
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
- vport->fc_flag & FC_PUBLIC_LOOP &&
- !(vport->fc_flag & FC_LBIT)) {
+ test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) &&
+ !test_bit(FC_LBIT, &vport->fc_flag)) {
/* Need to wait for FAN - use discovery timer
* for timeout. port_state is identically
* LPFC_LOCAL_CFG_LINK while waiting for FAN
@@ -1111,22 +1542,52 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl.
*/
- if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
- lpfc_initial_flogi(vport);
+ if (vport->port_state != LPFC_FLOGI) {
+ /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
+ * bb-credit recovery is in place.
+ */
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ sparam_mb = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!sparam_mb)
+ goto sparam_out;
+
+ rc = lpfc_read_sparam(phba, sparam_mb, 0);
+ if (rc) {
+ mempool_free(sparam_mb, phba->mbox_mem_pool);
+ goto sparam_out;
+ }
+ sparam_mb->vport = vport;
+ sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
+ MBOX_THD_UNLOCKED);
+ goto sparam_out;
+ }
+
+ set_bit(HBA_DEFER_FLOGI, &phba->hba_flag);
+ } else {
+ lpfc_initial_flogi(vport);
+ }
+ } else {
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
+ lpfc_disc_start(vport);
+ }
return;
out:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0306 CONFIG_LINK mbxStatus error x%x "
- "HBA state x%x\n",
- pmb->u.mb.mbxStatus, vport->port_state);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
+ status, vport->port_state);
+sparam_out:
lpfc_linkdown(phba);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0200 CONFIG_LINK bad hba state x%x\n",
vport->port_state);
@@ -1136,7 +1597,7 @@ out:
/**
* lpfc_sli4_clear_fcf_rr_bmask
- * @phba pointer to the struct lpfc_hba for this port.
+ * @phba: pointer to the struct lpfc_hba for this port.
* This fucnction resets the round robin bit mask and clears the
* fcf priority list. The list deletions are done while holding the
* hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
@@ -1162,10 +1623,10 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "2017 REG_FCFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2017 REG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n", mboxq->u.mb.mbxStatus,
+ vport->port_state);
goto fail_out;
}
@@ -1177,27 +1638,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */
- if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
- lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
goto fail_out;
/* Mark successful completion of FCF table scan */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
if (vport->port_state != LPFC_FLOGI) {
- phba->hba_flag |= FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_issue_init_vfi(vport);
- goto out;
}
- spin_unlock_irq(&phba->hbalock);
goto out;
fail_out:
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
out:
mempool_free(mboxq, phba->mbox_mem_pool);
}
@@ -1235,7 +1692,7 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
/**
* lpfc_sw_name_match - Check if the fcf switch name match.
- * @fab_name: pointer to fabric name.
+ * @sw_name: pointer to switch name.
* @new_fcf_record: pointer to fcf record.
*
* This routine compare the fcf record's switch name with provided
@@ -1298,14 +1755,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record.
*
* This routine updates the driver FCF priority record from the new HBA FCF
- * record. This routine is called with the host lock held.
+ * record. The hbalock is asserted held in the code path calling this
+ * routine.
**/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1323,7 +1780,7 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
- * @fcf: pointer to driver fcf record.
+ * @fcf_rec: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
*
* This routine copies the FCF information from the FCF
@@ -1381,7 +1838,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record - Update driver fcf record
* @phba: pointer to lpfc hba data structure.
* @fcf_rec: pointer to driver fcf record.
* @new_fcf_record: pointer to hba fcf record.
@@ -1391,13 +1848,15 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
*
* This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This
- * routine is called with the host lock held.
+ * routine is called with the hbalock held.
**/
static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
struct fcf_record *new_fcf_record, uint32_t addr_mode,
uint16_t vlan_id, uint32_t flag)
{
+ lockdep_assert_held(&phba->hbalock);
+
/* Copy the fields from the HBA's FCF record */
lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
/* Update other fields of driver FCF record */
@@ -1425,32 +1884,31 @@ lpfc_register_fcf(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* If the FCF is not available do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
return;
}
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
if (phba->pport->port_state != LPFC_FLOGI &&
- phba->pport->fc_flag & FC_FABRIC) {
- phba->hba_flag |= FCF_RR_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ test_bit(FC_FABRIC, &phba->pport->fc_flag)) {
+ set_bit(FCF_RR_INPROG, &phba->hba_flag);
lpfc_initial_flogi(phba->pport);
return;
}
- spin_unlock_irq(&phba->hbalock);
return;
}
spin_unlock_irq(&phba->hbalock);
fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!fcf_mbxq) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
return;
}
@@ -1459,9 +1917,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
}
@@ -1514,7 +1971,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
return 0;
- if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) {
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
@@ -1709,8 +2166,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2833 Stop FCF discovery process due to link "
"state change (x%x)\n", phba->link_state);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
}
@@ -1733,8 +2191,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -1746,7 +2204,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
@@ -1773,10 +2231,9 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
uint16_t *next_fcf_index)
{
void *virt_addr;
- dma_addr_t phys_addr;
struct lpfc_mbx_sge sge;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
- uint32_t shdr_status, shdr_add_status;
+ uint32_t shdr_status, shdr_add_status, if_type;
union lpfc_sli4_cfg_shdr *shdr;
struct fcf_record *new_fcf_record;
@@ -1784,9 +2241,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
if (unlikely(!mboxq->sge_array)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2524 Failed to get the non-embedded SGE "
"virtual address\n");
return NULL;
@@ -1797,14 +2253,17 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
lpfc_sli_pcimem_bcopy(shdr, shdr,
sizeof(union lpfc_sli4_cfg_shdr));
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status) {
- if (shdr_status == STATUS_FCF_TABLE_EMPTY)
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
+ if_type == LPFC_SLI_INTF_IF_TYPE_2)
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2726 READ_FCF_RECORD Indicates empty "
"FCF table.\n");
else
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2521 READ_FCF_RECORD mailbox failed "
"with status x%x add_status x%x, "
"mbx\n", shdr_status, shdr_add_status);
@@ -1888,7 +2347,7 @@ lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
}
/**
- lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
* @phba: pointer to lpfc hba data structure.
* @fcf_rec: pointer to an existing FCF record.
* @new_fcf_record: pointer to a new FCF record.
@@ -1937,9 +2396,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
int rc;
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2872 Devloss tmo with no eligible "
"FCF, unregister in-use FCF (x%x) "
@@ -1949,8 +2406,9 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
goto stop_flogi_current_fcf;
}
/* Mark the end to FLOGI roundrobin failover */
- phba->hba_flag &= ~FCF_RR_INPROG;
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
/* Allow action to new fcf asynchronous event */
+ spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1958,6 +2416,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
"failover and change port state:x%x/x%x\n",
phba->pport->port_state, LPFC_VPORT_UNKNOWN);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+ if (!phba->fcf.fcf_redisc_attempted) {
+ lpfc_unregister_fcf(phba);
+
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3195 Rediscover FCF table\n");
+ phba->fcf.fcf_redisc_attempted = 1;
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3196 Rediscover FCF table "
+ "failed. Status:x%x\n", rc);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3197 Already rediscover FCF table "
+ "attempted. No more retry\n");
+ }
goto stop_flogi_current_fcf;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
@@ -1982,7 +2460,7 @@ stop_flogi_current_fcf:
/**
* lpfc_sli4_fcf_pri_list_del
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to delete
+ * @fcf_index: the index of the fcf record to delete
* This routine checks the on list flag of the fcf_index to be deleted.
* If it is one the list then it is removed from the list, and the flag
* is cleared. This routine grab the hbalock before removing the fcf
@@ -2012,9 +2490,9 @@ static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
/**
* lpfc_sli4_set_fcf_flogi_fail
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to update
+ * @fcf_index: the index of the fcf record to update
* This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
- * flag so the the round robin slection for the particular priority level
+ * flag so the round robin selection for the particular priority level
* will try a different fcf record that does not have this bit set.
* If the fcf record is re-read for any reason this flag is cleared brfore
* adding it to the priority list.
@@ -2032,7 +2510,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
/**
* lpfc_sli4_fcf_pri_list_add
* @phba: pointer to lpfc hba data structure.
- * @fcf_index the index of the fcf record to add
+ * @fcf_index: the index of the fcf record to add
+ * @new_fcf_record: pointer to a new FCF record.
* This routine checks the priority of the fcf_index to be added.
* If it is a lower priority than the current head of the fcf_pri list
* then it is added to the list in the right order.
@@ -2044,7 +2523,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
* returns:
* 0=success 1=failure
**/
-int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
+ uint16_t fcf_index,
struct fcf_record *new_fcf_record)
{
uint16_t current_fcf_pri;
@@ -2147,8 +2627,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
- uint16_t vlan_id;
- uint32_t seed;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
bool select_new_fcf;
int rc;
@@ -2162,13 +2641,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
&next_fcf_index);
if (!new_fcf_record) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
}
@@ -2206,7 +2683,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
phba->fcf.current_rec.fcf_indx) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_TRACE_EVENT,
"2862 FCF (x%x) matches property "
"of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@@ -2270,13 +2748,13 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 New FCF matches in-use "
"FCF (x%x), port_state:x%x, "
- "fc_flag:x%x\n",
+ "fc_flag:x%lx\n",
phba->fcf.current_rec.fcf_indx,
phba->pport->port_state,
phba->pport->fc_flag);
goto out;
} else
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2863 New FCF (x%x) matches "
"property of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
@@ -2385,9 +2863,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcf.fcf_flag |= FCF_AVAILABLE;
/* Setup initial running random FCF selection count */
phba->fcf.eligible_fcf_cnt = 1;
- /* Seeding the random number generator for random selection */
- seed = (uint32_t)(0xFFFFFFFF & jiffies);
- prandom_seed(seed);
}
spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
@@ -2411,10 +2886,10 @@ read_next_fcf:
phba->fcoe_eventtag_at_fcf_scan,
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_DEVLOSS_TMO) {
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(HBA_DEVLOSS_TMO,
+ &phba->hba_flag)) {
+ clear_bit(FCF_TS_INPROG,
+ &phba->hba_flag);
/* Unregister in-use FCF and rescan */
lpfc_printf_log(phba, KERN_INFO,
LOG_FIP,
@@ -2427,8 +2902,7 @@ read_next_fcf:
/*
* Let next new FCF event trigger fast failover
*/
- phba->hba_flag &= ~FCF_TS_INPROG;
- spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_TS_INPROG, &phba->hba_flag);
return;
}
/*
@@ -2527,15 +3001,15 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
if (phba->link_state < LPFC_LINK_UP) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
- phba->hba_flag &= ~FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
+ clear_bit(FCF_RR_INPROG, &phba->hba_flag);
goto out;
}
@@ -2545,8 +3019,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (!new_fcf_record) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
- "failed to retrieve a FCF record.\n");
- goto error_out;
+ "failed to retrieve a FCF record. "
+ "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag,
+ phba->fcf.fcf_flag);
+ lpfc_unregister_fcf_rescan(phba);
+ goto out;
}
/* Get the needed parameters from FCF record */
@@ -2629,7 +3106,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -2677,7 +3154,7 @@ out:
*
* This function handles completion of init vfi mailbox command.
*/
-void
+static void
lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
@@ -2690,10 +3167,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
- "2891 Init VFI mailbox failed 0x%x\n",
- mboxq->u.mb.mbxStatus);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2891 Init VFI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@@ -2721,7 +3197,7 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2892 Failed to allocate "
+ LOG_TRACE_EVENT, "2892 Failed to allocate "
"init_vfi mailbox\n");
return;
}
@@ -2729,8 +3205,8 @@ lpfc_issue_init_vfi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2893 Failed to issue init_vfi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@@ -2747,27 +3223,23 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
- "2609 Init VPI mailbox failed 0x%x\n",
- mboxq->u.mb.mbxStatus);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2609 Init VPI mailbox failed 0x%x\n",
+ mboxq->u.mb.mbxStatus);
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
/* If this port is physical port or FDISC is done, do reg_vpi */
if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_DISCOVERY,
+ LOG_TRACE_EVENT,
"2731 Cannot find fabric "
"controller node\n");
else
@@ -2780,7 +3252,7 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_initial_fdisc(vport);
else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2606 No NPIV Fabric support\n");
}
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -2803,8 +3275,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
vpi = lpfc_alloc_vpi(vport->phba);
if (!vpi) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3303 Failed to obtain vport vpi\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
@@ -2815,7 +3286,7 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2607 Failed to allocate "
+ LOG_TRACE_EVENT, "2607 Failed to allocate "
"init_vpi mailbox\n");
return;
}
@@ -2824,8 +3295,8 @@ lpfc_issue_init_vpi(struct lpfc_vport *vport)
mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2608 Failed to issue init_vpi mailbox\n");
mempool_free(mboxq, vport->phba->mbox_mem_pool);
}
}
@@ -2859,7 +3330,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
FC_VPORT_LINKDOWN);
continue;
}
- if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+ if (test_bit(FC_VPORT_NEEDS_INIT_VPI,
+ &vports[i]->fc_flag)) {
lpfc_issue_init_vpi(vports[i]);
continue;
}
@@ -2869,7 +3341,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
lpfc_vport_set_state(vports[i],
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vports[i], KERN_ERR,
- LOG_ELS,
+ LOG_TRACE_EVENT,
"0259 No NPIV "
"Fabric support\n");
}
@@ -2881,7 +3353,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- struct lpfc_dmabuf *dmabuf = mboxq->context1;
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -2893,10 +3364,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_0) &&
mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "2018 REG_VFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "2018 REG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2912,17 +3383,17 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* Unless this was a VFI update and we are in PT2PT mode, then
* we should drop through to set the port state to ready.
*/
- if (vport->fc_flag & FC_VFI_REGISTERED)
+ if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag))
if (!(phba->sli_rev == LPFC_SLI_REV4 &&
- vport->fc_flag & FC_PT2PT))
+ test_bit(FC_PT2PT, &vport->fc_flag)))
goto out_free_mem;
/* The VPI is implicitly registered when the VFI is registered */
+ set_bit(FC_VFI_REGISTERED, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
+ clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VFI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
- vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
/* In case SLI4 FC loopback test, we are ready */
@@ -2933,8 +3404,8 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
- "alpacnt:%d LinkState:%x topology:%x\n",
+ "3313 cmpl reg vfi port_state:%x fc_flag:%lx "
+ "myDid:%x alpacnt:%d LinkState:%x topology:%x\n",
vport->port_state, vport->fc_flag, vport->fc_myDID,
vport->phba->alpa_map[0],
phba->link_state, phba->fc_topology);
@@ -2944,14 +3415,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* For private loop or for NPort pt2pt,
* just start discovery and we are done.
*/
- if ((vport->fc_flag & FC_PT2PT) ||
- ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+ if (test_bit(FC_PT2PT, &vport->fc_flag) ||
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+ !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
- if (vport->fc_flag & FC_PT2PT)
+ if (test_bit(FC_PT2PT, &vport->fc_flag))
vport->port_state = LPFC_VPORT_READY;
else
lpfc_disc_start(vport);
@@ -2962,24 +3433,23 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
out_free_mem:
- mempool_free(mboxq, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
}
static void
lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
struct lpfc_vport *vport = pmb->vport;
-
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct serv_parm *sp = &vport->fc_sparam;
+ uint32_t ed_tov;
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
mb->mbxStatus, vport->port_state);
@@ -2989,24 +3459,39 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
+
+ ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+ if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
+ ed_tov = (ed_tov + 999999) / 1000000;
+
+ phba->fc_edtov = ed_tov;
+ phba->fc_ratov = (2 * ed_tov) / 1000;
+ if (phba->fc_ratov < FF_DEF_RATOV) {
+ /* RA_TOV should be atleast 10sec for initial flogi */
+ phba->fc_ratov = FF_DEF_RATOV;
+ }
+
lpfc_update_vport_wwn(vport);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) {
+ lpfc_initial_flogi(vport);
+ clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag);
+ }
return;
out:
- pmb->context1 = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_issue_clear_la(phba, vport);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
}
static void
@@ -3014,41 +3499,45 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
- struct Scsi_Host *shost;
int i;
- struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
- uint32_t fc_flags = 0;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
- switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
- case LPFC_LINK_SPEED_1GHZ:
- case LPFC_LINK_SPEED_2GHZ:
- case LPFC_LINK_SPEED_4GHZ:
- case LPFC_LINK_SPEED_8GHZ:
- case LPFC_LINK_SPEED_10GHZ:
- case LPFC_LINK_SPEED_16GHZ:
- phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
- break;
- default:
- phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
- break;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
+
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
+ switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+ case LPFC_LINK_SPEED_1GHZ:
+ case LPFC_LINK_SPEED_2GHZ:
+ case LPFC_LINK_SPEED_4GHZ:
+ case LPFC_LINK_SPEED_8GHZ:
+ case LPFC_LINK_SPEED_10GHZ:
+ case LPFC_LINK_SPEED_16GHZ:
+ case LPFC_LINK_SPEED_32GHZ:
+ case LPFC_LINK_SPEED_64GHZ:
+ case LPFC_LINK_SPEED_128GHZ:
+ case LPFC_LINK_SPEED_256GHZ:
+ break;
+ default:
+ phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
+ break;
+ }
}
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3314 Toplogy changed was 0x%x is 0x%x\n",
+ "3314 Topology changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
}
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
- phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
- shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -3061,7 +3550,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la))
- fc_flags |= FC_LBIT;
+ set_bit(FC_LBIT, &vport->fc_flag);
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3110,17 +3599,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
- fc_flags |= FC_LBIT;
- }
- spin_unlock_irq(&phba->hbalock);
-
- if (fc_flags) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= fc_flags;
- spin_unlock_irq(shost->host_lock);
+ set_bit(FC_LBIT, &vport->fc_flag);
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3134,14 +3619,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!cfglink_mbox)
goto out;
@@ -3161,12 +3643,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* is phase 1 implementation that support FCF index 0 and driver
* defaults.
*/
- if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) {
fcf_record = kzalloc(sizeof(struct fcf_record),
GFP_KERNEL);
if (unlikely(!fcf_record)) {
lpfc_printf_log(phba, KERN_ERR,
- LOG_MBOX | LOG_SLI,
+ LOG_TRACE_EVENT,
"2554 Could not allocate memory for "
"fcf record\n");
rc = -ENODEV;
@@ -3178,7 +3660,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR,
- LOG_MBOX | LOG_SLI,
+ LOG_TRACE_EVENT,
"2013 Could not manually add FCF "
"record 0, status %d\n", rc);
rc = -ENODEV;
@@ -3191,33 +3673,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
- spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irq(&phba->hbalock);
+ if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
return;
- }
/* This is the initial FCF discovery scan */
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag |= FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
}
+ /* Prepare for LINK up registrations */
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
@@ -3253,19 +3737,23 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
* This routine handles processing a READ_TOPOLOGY mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
- * handed off to the SLI layer.
+ * handed off to the SLI layer. SLI4 only.
*/
void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_mbx_read_top *la;
+ struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
+ uint8_t attn_type;
/* Unblock ELS traffic */
- phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ pring = lpfc_phba_elsring(phba);
+ if (pring)
+ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+
/* Check for error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -3277,36 +3765,25 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+ attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(shost->host_lock);
if (bf_get(lpfc_mbx_read_top_pb, la))
- vport->fc_flag |= FC_BYPASSED_MODE;
+ set_bit(FC_BYPASSED_MODE, &vport->fc_flag);
else
- vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_BYPASSED_MODE, &vport->fc_flag);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
- if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
+ if (attn_type == LPFC_ATT_LINK_UP)
if (phba->fc_eventTag != 0)
lpfc_linkdown(phba);
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irq(&phba->hbalock);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irq(&phba->hbalock);
- }
-
phba->link_events++;
- if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3320,80 +3797,57 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
- } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
- LPFC_ATT_LINK_DOWN) {
+
+ if (phba->cmf_active_mode != LPFC_CFG_OFF)
+ lpfc_cmf_signal_init(phba);
+
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
+ } else if (attn_type == LPFC_ATT_LINK_DOWN ||
+ attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
if (phba->link_flag & LS_LOOPBACK_MODE)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
- "Data: x%x x%x x%x\n",
+ "Data: x%x x%x x%lx\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
+ else if (attn_type == LPFC_ATT_UNEXP_WWPN)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%lx x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag,
+ bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%lx x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
- ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3406,17 +3860,24 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_dmabuf *mp = pmb->ctx_buf;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ /* The driver calls the state machine with the pmb pointer
+ * but wants to make sure a stale ctx_buf isn't acted on.
+ * The ctx_buf is restored later and cleaned up.
+ */
+ pmb->ctx_buf = NULL;
+ pmb->ctx_ndlp = NULL;
- if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
+ "0002 rpi:%x DID:%x flg:%lx %d x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp);
+ clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
- if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+ if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) ||
ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
/* We rcvd a rscn after issuing this
* mbox reg login, we may have cycled
@@ -3426,17 +3887,22 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* there is another reg login in
* process.
*/
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
- spin_unlock_irq(shost->host_lock);
- } else
- /* Good status, call state machine */
- lpfc_disc_state_machine(vport, ndlp, pmb,
- NLP_EVT_CMPL_REG_LOGIN);
+ clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
+
+ /*
+ * We cannot leave the RPI registered because
+ * if we go thru discovery again for this ndlp
+ * a subsequent REG_RPI will fail.
+ */
+ set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ lpfc_unreg_rpi(vport, ndlp);
+ }
+
+ /* Call state machine */
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ pmb->ctx_buf = mp;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
/* decrement the node reference count held for this callback
* function.
*/
@@ -3461,25 +3927,25 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
break;
/* If VPI is busy, reset the HBA */
case 0x9700:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
vport->vpi, mb->mbxStatus);
- if (!(phba->pport->load_flag & FC_UNLOADING))
+ if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
lpfc_workq_post_event(phba, NULL, NULL,
LPFC_EVT_RESET_HBA);
}
+
+ set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
*/
- if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+ if (test_bit(FC_UNLOADING, &vport->load_flag) && vport != phba->pport)
scsi_host_put(shost);
}
@@ -3499,10 +3965,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
@@ -3523,26 +3988,31 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"0912 cmpl_reg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_FABRIC, &vport->fc_flag);
+ clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag);
vport->fc_myDID = 0;
+
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (phba->nvmet_support)
+ lpfc_nvmet_update_targetport(phba);
+ else
+ lpfc_nvme_update_localport(vport);
+ }
goto out;
}
+ clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
vport->num_disc_nodes = 0;
/* go thru NPR list and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
vport->port_state = LPFC_VPORT_READY;
@@ -3578,7 +4048,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0542 lpfc_create_static_vport failed to"
" allocate mailbox memory\n");
return;
@@ -3588,7 +4058,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
if (!vport_info) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0543 lpfc_create_static_vport failed to"
" allocate vport_info\n");
mempool_free(pmb, phba->mbox_mem_pool);
@@ -3597,11 +4067,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
- /* free dma buffer from previous round */
- if (pmb->context1) {
- mp = (struct lpfc_dmabuf *)pmb->context1;
+ /* While loop iteration forces a free dma buffer from
+ * the previous loop because the mbox is reused and
+ * the dump routine is a single-use construct.
+ */
+ if (pmb->ctx_buf) {
+ mp = pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
+ pmb->ctx_buf = NULL;
}
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
@@ -3621,7 +4095,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if (phba->sli_rev == LPFC_SLI_REV4) {
byte_count = pmb->u.mqe.un.mb_words[5];
- mp = (struct lpfc_dmabuf *)pmb->context1;
+ mp = pmb->ctx_buf;
if (byte_count > sizeof(struct static_vport_info) -
offset)
byte_count = sizeof(struct static_vport_info)
@@ -3649,11 +4123,12 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
!= VPORT_INFO_REV)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0545 lpfc_create_static_vport bad"
- " information header 0x%x 0x%x\n",
- le32_to_cpu(vport_info->signature),
- le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0545 lpfc_create_static_vport bad"
+ " information header 0x%x 0x%x\n",
+ le32_to_cpu(vport_info->signature),
+ le32_to_cpu(vport_info->rev) &
+ VPORT_INFO_REV_MASK);
goto out;
}
@@ -3685,16 +4160,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
- if (mbx_wait_rc != MBX_TIMEOUT) {
- if (pmb->context1) {
- mp = (struct lpfc_dmabuf *)pmb->context1;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(pmb, phba->mbox_mem_pool);
- }
-
- return;
+ if (mbx_wait_rc != MBX_TIMEOUT)
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3708,22 +4175,15 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_ndlp = NULL;
if (mb->mbxStatus) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -3747,27 +4207,19 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
- ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* when physical port receive logo donot start
* vport discovery */
- if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+ if (!test_and_clear_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag))
lpfc_start_fdiscs(phba);
- else {
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
- spin_unlock_irq(shost->host_lock);
- }
lpfc_do_scr_ns_plogi(phba, vport);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -3776,6 +4228,81 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+ /*
+ * This routine will issue a GID_FT for each FC4 Type supported
+ * by the driver. ALL GID_FTs must complete before discovery is started.
+ */
+int
+lpfc_issue_gidft(struct lpfc_vport *vport)
+{
+ /* Good status, issue CT Request to NameServer */
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
+ /* Cannot issue NameServer FCP Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
+ "0604 %s FC TYPE %x %s\n",
+ "Failed to issue GID_FT to ",
+ FC_TYPE_FCP,
+ "Finishing discovery.");
+ return 0;
+ }
+ vport->gidft_inp++;
+ }
+
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
+ /* Cannot issue NameServer NVME Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
+ "0605 %s FC_TYPE %x %s %d\n",
+ "Failed to issue GID_FT to ",
+ FC_TYPE_NVME,
+ "Finishing discovery: gidftinp ",
+ vport->gidft_inp);
+ if (vport->gidft_inp == 0)
+ return 0;
+ } else
+ vport->gidft_inp++;
+ }
+ return vport->gidft_inp;
+}
+
+/**
+ * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This routine will issue a GID_PT to get a list of all N_Ports
+ *
+ * Return value :
+ * 0 - Failure to issue a GID_PT
+ * 1 - GID_PT issued
+ **/
+int
+lpfc_issue_gidpt(struct lpfc_vport *vport)
+{
+ /* Good status, issue CT Request to NameServer */
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
+ /* Cannot issue NameServer FCP Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0606 %s Port TYPE %x %s\n",
+ "Failed to issue GID_PT to ",
+ GID_PT_N_PORT,
+ "Finishing discovery.");
+ return 0;
+ }
+ vport->gidft_inp++;
+ return 1;
+}
+
/*
* This routine handles processing a NameServer REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -3786,28 +4313,34 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
+ int rc;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_ndlp = NULL;
+ vport->gidft_inp = 0;
if (mb->mbxStatus) {
-out:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
+
+out:
/* decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- /* If no other thread is using the ndlp, free it */
- lpfc_nlp_not_used(ndlp);
+ /* If the node is not registered with the scsi or nvme
+ * transport, remove the fabric node. The failed reg_login
+ * is terminal and forces the removal of the last node
+ * reference.
+ */
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
+ clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ lpfc_nlp_put(ndlp);
+ }
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/*
@@ -3826,40 +4359,118 @@ out:
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
- ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0003 rpi:%x DID:%x flg:%lx %d x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp);
if (vport->port_state < LPFC_VPORT_READY) {
/* Link up discovery requires Fabric registration. */
- lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ if (phba->pni)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPNI_PNI, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
+
+ if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
+ FC_TYPE_NVME);
+
/* Issue SCR just before NameServer GID_FT Query */
- lpfc_issue_els_scr(vport, SCR_DID, 0);
+ lpfc_issue_els_scr(vport, 0);
+
+ /* Link was bounced or a Fabric LOGO occurred. Start EDC
+ * with initial FW values provided the congestion mode is
+ * not off. Note that signals may or may not be supported
+ * by the adapter but FPIN is provided by default for 1
+ * or both missing signals support.
+ */
+ if (phba->cmf_active_mode != LPFC_CFG_OFF) {
+ phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
+ phba->cgn_reg_signal = phba->cgn_init_reg_signal;
+ rc = lpfc_issue_els_edc(vport, 0);
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_ELS | LOG_DISCOVERY,
+ "4220 Issue EDC status x%x Data x%x\n",
+ rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
+ } else {
+ lpfc_issue_els_rdf(vport, 0);
+ }
}
vport->fc_ns_retry = 0;
- /* Good status, issue CT Request to NameServer */
- if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
- /* Cannot issue NameServer Query, so finish up discovery */
+ if (lpfc_issue_gidft(vport) == 0)
goto out;
- }
- /* decrement the node reference count held for this
+ /*
+ * At this point in time we may need to wait for multiple
+ * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
+ *
+ * decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
+/*
+ * This routine handles processing a Fabric Controller REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+
+ pmb->ctx_ndlp = NULL;
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0933 %s: Register FC login error: 0x%x\n",
+ __func__, mb->mbxStatus);
+ goto out;
+ }
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_state);
+
+ set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag);
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ out:
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
+}
+
static void
lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -3868,6 +4479,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ return;
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -3875,54 +4490,63 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rport_ids.port_id = ndlp->nlp_DID;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- /*
- * We leave our node pointer in rport->dd_data when we unregister a
- * FCP target port. But fc_remote_port_add zeros the space to which
- * rport->dd_data points. So, if we're reusing a previously
- * registered port, drop the reference that we took the last time we
- * registered the port.
- */
- if (ndlp->rport && ndlp->rport->dd_data &&
- ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
- lpfc_nlp_put(ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
- "rport add: did:x%x flg:x%x type x%x",
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+ "rport add: did:x%x flg:x%lx type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
/* Don't add the remote port if unloading. */
- if (vport->load_flag & FC_UNLOADING)
+ if (test_bit(FC_UNLOADING, &vport->load_flag))
return;
ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
- if (!rport || !get_device(&rport->dev)) {
+ if (!rport) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
return;
}
- /* initialize static port data */
+ /* Successful port add. Complete initializing node data */
rport->maxframe_size = ndlp->nlp_maxframe;
rport->supported_classes = ndlp->nlp_class_sup;
rdata = rport->dd_data;
rdata->pnode = lpfc_nlp_get(ndlp);
+ if (!rdata->pnode) {
+ dev_warn(&phba->pcidev->dev,
+ "Warning - node ref failed. Unreg rport\n");
+ fc_remote_port_delete(rport);
+ ndlp->rport = NULL;
+ return;
+ }
+
+ spin_lock_irqsave(&ndlp->lock, flags);
+ ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, flags);
if (ndlp->nlp_type & NLP_FCP_TARGET)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
- rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+ rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport %p role x%x\n",
- ndlp->nlp_DID, rport, rport_ids.roles);
+ "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
+ __func__, rport, rport->port_id, rport->roles,
+ kref_read(&ndlp->kref));
if ((rport->scsi_target_id != -1) &&
(rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
+
return;
}
@@ -3930,116 +4554,254 @@ static void
lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
+ struct lpfc_vport *vport = ndlp->vport;
+
+ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ return;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
- "rport delete: did:x%x flg:x%x type x%x",
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport delete: did:x%x flg:x%lx type x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport %p\n",
- ndlp->nlp_DID, rport);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "3184 rport unregister x%06x, rport x%px "
+ "xptflg x%x refcnt %d\n",
+ ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
fc_remote_port_delete(rport);
-
- return;
+ lpfc_nlp_put(ndlp);
}
static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
switch (state) {
case NLP_STE_UNUSED_NODE:
- vport->fc_unused_cnt += count;
+ atomic_add(count, &vport->fc_unused_cnt);
break;
case NLP_STE_PLOGI_ISSUE:
- vport->fc_plogi_cnt += count;
+ atomic_add(count, &vport->fc_plogi_cnt);
break;
case NLP_STE_ADISC_ISSUE:
- vport->fc_adisc_cnt += count;
+ atomic_add(count, &vport->fc_adisc_cnt);
break;
case NLP_STE_REG_LOGIN_ISSUE:
- vport->fc_reglogin_cnt += count;
+ atomic_add(count, &vport->fc_reglogin_cnt);
break;
case NLP_STE_PRLI_ISSUE:
- vport->fc_prli_cnt += count;
+ atomic_add(count, &vport->fc_prli_cnt);
break;
case NLP_STE_UNMAPPED_NODE:
- vport->fc_unmap_cnt += count;
+ atomic_add(count, &vport->fc_unmap_cnt);
break;
case NLP_STE_MAPPED_NODE:
- vport->fc_map_cnt += count;
+ atomic_add(count, &vport->fc_map_cnt);
break;
case NLP_STE_NPR_NODE:
- vport->fc_npr_cnt += count;
+ if (!atomic_read(&vport->fc_npr_cnt) && count == -1)
+ atomic_set(&vport->fc_npr_cnt, 0);
+ else
+ atomic_add(count, &vport->fc_npr_cnt);
break;
}
- spin_unlock_irq(shost->host_lock);
+}
+
+/* Register a node with backend if not already done */
+void
+lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ lpfc_check_nlp_post_devloss(vport, ndlp);
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ /* Already registered with backend, trigger rescan */
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
+ lpfc_nvme_rescan_port(vport, ndlp);
+ }
+ return;
+ }
+
+ ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (lpfc_valid_xpt_node(ndlp)) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+
+ /* We are done if we do not have any NVME remote node */
+ if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
+ return;
+
+ /* Notify the NVME transport of this new rport. */
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->nvmet_support == 0) {
+ /* Register this rport with the transport.
+ * Only NVME Target Rports are registered with
+ * the transport.
+ */
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
+ } else {
+ /* Just take an NDLP ref count since the
+ * target does not register rports.
+ */
+ lpfc_nlp_get(ndlp);
+ }
+ }
+}
+
+/* Unregister a node with backend if not already done */
+void
+lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%lx XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
+ return;
+ }
+
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+
+ if (ndlp->rport &&
+ ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%lx"
+ " XPT x%x refcnt %u\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
+ }
+
+ if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
+ vport->phba->nport_event_cnt++;
+ if (vport->phba->nvmet_support == 0) {
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
+ }
+ }
+
+}
+
+/*
+ * Adisc state change handling
+ */
+static void
+lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int new_state)
+{
+ switch (new_state) {
+ /*
+ * Any state to ADISC_ISSUE
+ * Do nothing, adisc cmpl handling will trigger state changes
+ */
+ case NLP_STE_ADISC_ISSUE:
+ break;
+
+ /*
+ * ADISC_ISSUE to mapped states
+ * Trigger a registration with backend, it will be nop if
+ * already registered
+ */
+ case NLP_STE_UNMAPPED_NODE:
+ ndlp->nlp_type |= NLP_FC_NODE;
+ fallthrough;
+ case NLP_STE_MAPPED_NODE:
+ clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
+ lpfc_nlp_reg_node(vport, ndlp);
+ break;
+
+ /*
+ * ADISC_ISSUE to non-mapped states
+ * We are moving from ADISC_ISSUE to a non-mapped state because
+ * ADISC failed, we would have skipped unregistering with
+ * backend, attempt it now
+ */
+ case NLP_STE_NPR_NODE:
+ clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
+ fallthrough;
+ default:
+ lpfc_nlp_unreg_node(vport, ndlp);
+ break;
+ }
+
}
static void
lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Trap ADISC changes here */
+ if (new_state == NLP_STE_ADISC_ISSUE ||
+ old_state == NLP_STE_ADISC_ISSUE) {
+ lpfc_handle_adisc_state(vport, ndlp, new_state);
+ return;
+ }
if (new_state == NLP_STE_UNMAPPED_NODE) {
- ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
ndlp->nlp_type |= NLP_FC_NODE;
}
if (new_state == NLP_STE_MAPPED_NODE)
- ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+ clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag);
if (new_state == NLP_STE_NPR_NODE)
- ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+ clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag);
- /* Transport interface */
- if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
- old_state == NLP_STE_UNMAPPED_NODE)) {
- vport->phba->nport_event_cnt++;
- lpfc_unregister_remote_port(ndlp);
+ /* Reg/Unreg for FCP and NVME Transport interface */
+ if ((old_state == NLP_STE_MAPPED_NODE ||
+ old_state == NLP_STE_UNMAPPED_NODE)) {
+ /* For nodes marked for ADISC, Handle unreg in ADISC cmpl
+ * if linkup. In linkdown do unreg_node
+ */
+ if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) ||
+ !lpfc_is_link_up(vport->phba))
+ lpfc_nlp_unreg_node(vport, ndlp);
}
if (new_state == NLP_STE_MAPPED_NODE ||
- new_state == NLP_STE_UNMAPPED_NODE) {
- vport->phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- * sure to unblock any attached scsi devices
- */
- lpfc_register_remote_port(vport, ndlp);
- }
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
+ new_state == NLP_STE_UNMAPPED_NODE)
+ lpfc_nlp_reg_node(vport, ndlp);
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
- * if we added to Mapped list, but the remote port
- * registration failed or assigned a target id outside
- * our presentable range - move the node to the
- * Unmapped List
+ * If the node just added to Mapped list was an FCP target,
+ * but the remote port registration failed or assigned a target
+ * id outside the presentable range - move the node to the
+ * Unmapped List.
*/
- if (new_state == NLP_STE_MAPPED_NODE &&
+ if ((new_state == NLP_STE_MAPPED_NODE) &&
+ (ndlp->nlp_type & NLP_FCP_TARGET) &&
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(shost->host_lock);
+ set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -4060,7 +4822,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
};
if (state < NLP_STE_MAX_STATE && states[state])
- strlcpy(buffer, states[state], size);
+ strscpy(buffer, states[state], size);
else
snprintf(buffer, size, "unknown (%d)", state);
return buffer;
@@ -4070,9 +4832,10 @@ void
lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int state)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
+ bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag);
char name1[16], name2[16];
+ unsigned long iflags;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0904 NPort state transition x%06x, %s -> %s\n",
@@ -4084,18 +4847,24 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node statechg did:x%x old:%d ste:%d",
ndlp->nlp_DID, old_state, state);
+ if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
+ state != NLP_STE_UNUSED_NODE) {
+ clear_bit(NLP_DROPPED, &ndlp->nlp_flag);
+ lpfc_nlp_get(ndlp);
+ }
+
if (old_state == NLP_STE_NPR_NODE &&
state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (old_state == NLP_STE_UNMAPPED_NODE) {
- ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+ clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag);
ndlp->nlp_type &= ~NLP_FC_NODE;
}
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
} else if (old_state)
lpfc_nlp_counters(vport, old_state, -1);
@@ -4107,39 +4876,30 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void
lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
}
void
lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE);
}
-static void
-lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
- lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
- NLP_STE_UNUSED_NODE);
-}
/**
* lpfc_initialize_node - Initialize all fields of node object
* @vport: Pointer to Virtual Port object.
@@ -4160,71 +4920,18 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
+ INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
+
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
+ ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
-}
-
-struct lpfc_nodelist *
-lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- int state)
-{
- struct lpfc_hba *phba = vport->phba;
- uint32_t did;
- unsigned long flags;
-
- if (!ndlp)
- return NULL;
-
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* The ndlp should not be in memory free mode */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 lpfc_enable_node: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- return NULL;
- }
- /* The ndlp should not already be in active mode */
- if (NLP_CHK_NODE_ACT(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 lpfc_enable_node: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- return NULL;
- }
-
- /* Keep the original DID */
- did = ndlp->nlp_DID;
-
- /* re-initialize ndlp except of ndlp linked list pointer */
- memset((((char *)ndlp) + sizeof (struct list_head)), 0,
- sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
- lpfc_initialize_node(vport, ndlp, did);
-
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
-
- if (state != NLP_STE_UNUSED_NODE)
- lpfc_nlp_set_state(vport, ndlp, state);
-
- lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
- "node enable: did:x%x",
- ndlp->nlp_DID, 0, 0);
- return ndlp;
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
}
void
@@ -4232,18 +4939,23 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
/*
* Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
- * be used if we wish to issue the "last" lpfc_nlp_put() to remove
- * the ndlp from the vport. The ndlp marked as UNUSED on the list
- * until ALL other outstanding threads have completed. We check
- * that the ndlp not already in the UNUSED state before we proceed.
+ * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
+ * release the ndlp from the vport when conditions are correct.
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
- lpfc_nlp_put(ndlp);
- return;
+ lpfc_unreg_rpi(vport, ndlp);
+ }
+
+ /* NLP_DROPPED means another thread already removed the initial
+ * reference from lpfc_nlp_init. If set, don't drop it again and
+ * introduce an imbalance.
+ */
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
}
/*
@@ -4252,7 +4964,6 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_set_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo;
@@ -4273,18 +4984,17 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
- mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
+ mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo));
+ set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
vport->port_state, tmo,
- (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ (unsigned long)&vport->fc_disctmo,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return;
}
@@ -4295,7 +5005,6 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
int
lpfc_can_disctmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long iflags;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -4303,11 +5012,10 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO) {
- spin_lock_irqsave(shost->host_lock, iflags);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irqrestore(shost->host_lock, iflags);
- del_timer_sync(&vport->fc_disctmo);
+ if (test_bit(FC_DISC_TMO, &vport->fc_flag) ||
+ timer_pending(&vport->fc_disctmo)) {
+ clear_bit(FC_DISC_TMO, &vport->fc_flag);
+ timer_delete_sync(&vport->fc_disctmo);
spin_lock_irqsave(&vport->work_port_lock, iflags);
vport->work_port_events &= ~WORKER_DISC_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflags);
@@ -4316,9 +5024,10 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0248 Cancel Discovery Timer state x%x "
- "Data: x%x x%x x%x\n",
+ "Data: x%lx x%x x%x\n",
vport->port_state, vport->fc_flag,
- vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt));
return 0;
}
@@ -4332,42 +5041,92 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- struct lpfc_sli *psli = &phba->sli;
- IOCB_t *icmd = &iocb->iocb;
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
+ u8 ulp_command;
+ u16 ulp_context;
+ u32 remote_id;
if (iocb->vport != vport)
return 0;
+ ulp_command = get_job_cmnd(phba, iocb);
+ ulp_context = get_job_ulpcontext(phba, iocb);
+ remote_id = get_job_els_rsp64_did(phba, iocb);
+
if (pring->ringno == LPFC_ELS_RING) {
- switch (icmd->ulpCommand) {
+ switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
- if (iocb->context_un.ndlp == ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
+ break;
case CMD_ELS_REQUEST64_CR:
- if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ if (remote_id == ndlp->nlp_DID)
return 1;
+ fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
- if (iocb->context1 == (uint8_t *) ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
}
- } else if (pring->ringno == psli->extra_ring) {
-
- } else if (pring->ringno == psli->fcp_ring) {
+ } else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
- (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))
return 0;
- }
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
- return 1;
- }
- } else if (pring->ringno == psli->next_ring) {
+ if (ulp_context == ndlp->nlp_rpi)
+ return 1;
}
return 0;
}
+static void
+__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
+ struct list_head *dequeue_list)
+{
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+ /* match, dequeue */
+ list_move_tail(&iocb->list, dequeue_list);
+ }
+}
+
+static void
+lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t i;
+
+ spin_lock_irq(&phba->hbalock);
+ for (i = 0; i < psli->num_rings; i++)
+ __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
+ dequeue_list);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_queue *qp = NULL;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
+ spin_lock(&pring->ring_lock);
+ __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
+ spin_unlock(&pring->ring_lock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
/*
* Free resources / clean up outstanding I/Os
* associated with nlp_rpi in the LPFC_NODELIST entry.
@@ -4376,10 +5135,6 @@ static int
lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
- struct lpfc_iocbq *iocb, *next_iocb;
- uint32_t i;
lpfc_fabric_abort_nport(ndlp);
@@ -4387,29 +5142,11 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
*/
- psli = &phba->sli;
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
- /* Now process each ring */
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
-
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
- list) {
- /*
- * Check to see if iocb matches the nport we are
- * looking for
- */
- if ((lpfc_check_sli_ndlp(phba, pring, iocb,
- ndlp))) {
- /* It matches, so deque and call compl
- with an error */
- list_move_tail(&iocb->list,
- &completions);
- }
- }
- spin_unlock_irq(&phba->hbalock);
- }
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
+ else
+ lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
}
/* Cancel all the IOCBs from the completions list */
@@ -4427,16 +5164,68 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* This function will issue an ELS LOGO command after completing
* the UNREG_RPI.
**/
-void
+static void
lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_nodelist *ndlp;
- ndlp = (struct lpfc_nodelist *)(pmb->context1);
+ ndlp = pmb->ctx_ndlp;
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
+
+ /* Check to see if there are any deferred events to process */
+ if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) &&
+ ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "1434 UNREG cmpl deferred logo x%x "
+ "on NPort x%x Data: x%x x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did, ndlp);
+
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ }
+
+ /* The node has an outstanding reference for the unreg. Now
+ * that the LOGO action and cleanup are finished, release
+ * resources.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ /* Driver always gets a reference on the mailbox job
+ * in support of async jobs.
+ */
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!mbox->ctx_ndlp)
+ return;
+
+ if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) {
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !test_bit(FC_UNLOADING, &vport->load_flag) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
}
/*
@@ -4453,10 +5242,35 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- int rc;
+ int rc, acc_plogi = 1;
uint16_t rpi;
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) ||
+ test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) {
+ if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag))
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "3366 RPI x%x needs to be "
+ "unregistered nlp_flag x%lx "
+ "did x%x\n",
+ ndlp->nlp_rpi, ndlp->nlp_flag,
+ ndlp->nlp_DID);
+
+ /* If there is already an UNREG in progress for this ndlp,
+ * no need to queue up another one.
+ */
+ if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1436 unreg_rpi SKIP UNREG x%x on "
+ "NPort x%x deferred x%x flg x%lx "
+ "Data: x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_defer_did,
+ ndlp->nlp_flag, ndlp);
+ goto out;
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
/* SLI4 ports require the physical rpi value. */
@@ -4466,25 +5280,69 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->context1 = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (!mbox->ctx_ndlp) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 1;
}
+ /* Accept PLOGIs after unreg_rpi_cmpl. */
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ acc_plogi = 0;
+
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))
+ set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1433 unreg_rpi UNREG x%x on "
+ "NPort x%x deferred flg x%lx "
+ "Data:x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
mempool_free(mbox, phba->mbox_mem_pool);
+ acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
+ }
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "1444 Failed to allocate mempool "
+ "unreg_rpi UNREG x%x, "
+ "DID x%x, flag x%lx, "
+ "ndlp x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp);
+
+ /* Because mempool_alloc failed, we
+ * will issue a LOGO here and keep the rpi alive if
+ * not unloading.
+ */
+ if (!test_bit(FC_UNLOADING, &vport->load_flag)) {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
+
+ return 1;
}
lpfc_no_rpi(phba, ndlp);
-
+out:
if (phba->sli_rev != LPFC_SLI_REV4)
ndlp->nlp_rpi = 0;
- ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
+ clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
+ if (acc_plogi)
+ clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
return 1;
}
+ clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
return 0;
}
@@ -4500,27 +5358,28 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
int i;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
if (!vports) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2884 Vport array allocation failed \n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2884 Vport array allocation failed \n");
return;
}
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
/* The mempool_alloc might sleep */
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
lpfc_unreg_rpi(vports[i], ndlp);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock,
+ iflags);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
lpfc_destroy_vport_work_array(phba, vports);
}
@@ -4543,15 +5402,16 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
- "1836 Could not issue "
- "unreg_login(all_rpis) status %d\n", rc);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "1836 Could not issue "
+ "unreg_login(all_rpis) status %d\n",
+ rc);
}
}
@@ -4562,19 +5422,23 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mbox;
int rc;
+ /* Unreg DID is an SLI3 operation. */
+ if (phba->sli_rev > LPFC_SLI_REV3)
+ return;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
mbox);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->context1 = NULL;
+ mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
- lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1815 Could not issue "
"unreg_did (default rpis) status %d\n",
rc);
@@ -4588,33 +5452,16 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
static int
lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0900 Cleanup node for NPort x%x "
- "Data: x%x x%x x%x\n",
+ "Data: x%lx x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- if (NLP_CHK_FREE_REQ(ndlp)) {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 lpfc_cleanup_node: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- lpfc_dequeue_node(vport, ndlp);
- } else {
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 lpfc_cleanup_node: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- lpfc_disable_node(vport, ndlp);
- }
-
+ lpfc_dequeue_node(vport, ndlp);
/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
@@ -4622,8 +5469,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
- mb->context2 = NULL;
+ (ndlp == mb->ctx_ndlp)) {
+ mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
@@ -4633,27 +5480,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
- (ndlp != (struct lpfc_nodelist *) mb->context2))
+ (ndlp != mb->ctx_ndlp))
continue;
- mb->context2 = NULL;
+ mb->ctx_ndlp = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
- mp = (struct lpfc_dmabuf *) (mb->context1);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
+ (ndlp == mb->ctx_ndlp)) {
list_del(&mb->list);
- mempool_free(mb, phba->mbox_mem_pool);
- /* We shall not invoke the lpfc_nlp_put to decrement
- * the ndlp reference count as we are in the process
- * of lpfc_nlp_release.
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
+
+ /* Don't invoke lpfc_nlp_put. The driver is in
+ * lpfc_nlp_release context.
*/
}
}
@@ -4661,74 +5503,18 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_els_abort(phba, ndlp);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = 0;
- del_timer_sync(&ndlp->nlp_delayfunc);
+ timer_delete_sync(&ndlp->nlp_delayfunc);
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
- lpfc_unreg_rpi(vport, ndlp);
-
return 0;
}
-/*
- * Check to see if we can free the nlp back to the freelist.
- * If we are in the middle of using the nlp in the discovery state
- * machine, defer the free till we reach the end of the state machine.
- */
-static void
-lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- LPFC_MBOXQ_t *mbox;
- int rc;
-
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
- !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
- /* For this case we need to cleanup the default rpi
- * allocated by the firmware.
- */
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
- != NULL) {
- rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
- (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- else {
- mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
- mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
- mbox->vport = vport;
- mbox->context2 = ndlp;
- rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- }
- }
- }
- }
- lpfc_cleanup_node(vport, ndlp);
-
- /*
- * We can get here with a non-NULL ndlp->rport because when we
- * unregister a rport we don't break the rport/node linkage. So if we
- * do, make sure we don't leaving any dangling pointers behind.
- */
- if (ndlp->rport) {
- rdata = ndlp->rport->dd_data;
- rdata->pnode = NULL;
- ndlp->rport = NULL;
- }
-}
-
static int
lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -4753,9 +5539,19 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (matchdid.un.b.id == ndlpdid.un.b.id) {
if ((mydid.un.b.domain == matchdid.un.b.domain) &&
(mydid.un.b.area == matchdid.un.b.area)) {
+ /* This code is supposed to match the ID
+ * for a private loop device that is
+ * connect to fl_port. But we need to
+ * check that the port did not just go
+ * from pt2pt to fabric or we could end
+ * up matching ndlp->nlp_DID 000001 to
+ * fabric DID 0x20101
+ */
if ((ndlpdid.un.b.domain == 0) &&
(ndlpdid.un.b.area == 0)) {
- if (ndlpdid.un.b.id)
+ if (ndlpdid.un.b.id &&
+ vport->phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP)
return 1;
}
return 0;
@@ -4779,27 +5575,35 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (lpfc_matchdid(vport, ndlp, did)) {
- data1 = (((uint32_t) ndlp->nlp_state << 24) |
- ((uint32_t) ndlp->nlp_xri << 16) |
- ((uint32_t) ndlp->nlp_type << 8) |
- ((uint32_t) ndlp->nlp_rpi & 0xff));
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8)
+ );
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x\n",
+ "Data: x%px x%x x%lx x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1);
- return ndlp;
+ ndlp->nlp_flag, data1, ndlp->nlp_rpi,
+ ndlp->active_rrqs_xri_bitmap);
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
@@ -4816,67 +5620,134 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
}
struct lpfc_nodelist *
+lpfc_findnode_mapped(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+ uint32_t data1;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8) |
+ ((uint32_t)ndlp->nlp_rpi & 0xff));
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ "2025 FIND node DID MAPPED "
+ "Data: x%px x%x x%lx x%x x%px\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, data1,
+ ndlp->active_rrqs_xri_bitmap);
+ return ndlp;
+ }
+ }
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
+
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "2026 FIND mapped did NOT FOUND.\n");
+ return NULL;
+}
+
+struct lpfc_nodelist *
lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
- if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ if (vport->phba->nvmet_support)
+ return NULL;
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
- ndlp = (struct lpfc_nodelist *)
- mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, did);
if (!ndlp)
return NULL;
- lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- return ndlp;
- } else if (!NLP_CHK_NODE_ACT(ndlp)) {
- ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp)
- return NULL;
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%lx x%x x%lx\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
+ set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
return ndlp;
}
- if ((vport->fc_flag & FC_RSCN_MODE) &&
- !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+ /* The NVME Target does not want to actively manage an rport.
+ * The goal is to allow the target to reset its state and clear
+ * pending IO in preparation for the initiator to recover.
+ */
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
+ !test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) {
if (lpfc_rscn_payload_check(vport, did)) {
- /* If we've already received a PLOGI from this NPort
- * we don't need to try to discover it again.
- */
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
- return NULL;
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- } else
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%lx x%x x%lx\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
+ /* NVME Target mode waits until rport is known to be
+ * impacted by the RSCN before it transitions. No
+ * active management - just go to NPR provided the
+ * node had a valid login.
+ */
+ if (vport->phba->nvmet_support)
+ return ndlp;
+
+ if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+
+ set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%lx x%x x%lx\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
- /* If we've already received a PLOGI from this NPort,
- * or we are already in the process of discovery on it,
- * we don't need to try to discover it again.
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%lx x%x x%lx\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
+ /* If the initiator received a PLOGI from this NPort or if the
+ * initiator is already in the process of discovery on it,
+ * there's no need to try to discover it again.
*/
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
- ndlp->nlp_flag & NLP_RCV_PLOGI)
+ (!vport->phba->nvmet_support &&
+ test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)))
return NULL;
+
+ if (vport->phba->nvmet_support)
+ return ndlp;
+
+ /* Moving to NPR state clears unsolicited flags and
+ * allows for rediscovery
+ */
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
+ set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
}
return ndlp;
}
@@ -4922,14 +5793,14 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
return;
}
+/* SLI3 only */
void
lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
- struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
- struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
+ struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
+ struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
int rc;
/*
@@ -4953,7 +5824,6 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_disc_flush_list(vport);
extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
- next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
phba->link_state = LPFC_HBA_ERROR;
}
}
@@ -4981,11 +5851,9 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
void
lpfc_disc_start(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
- int did_changed;
if (!lpfc_is_link_up(phba)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
@@ -5004,20 +5872,17 @@ lpfc_disc_start(struct lpfc_vport *vport)
lpfc_set_disctmo(vport);
- if (vport->fc_prevDID == vport->fc_myDID)
- did_changed = 0;
- else
- did_changed = 1;
-
vport->fc_prevDID = vport->fc_myDID;
vport->num_disc_nodes = 0;
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
- vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ "0202 Start Discovery port state x%x "
+ "flg x%lx Data: x%x x%x x%x\n",
+ vport->port_state, vport->fc_flag,
+ atomic_read(&vport->fc_plogi_cnt),
+ atomic_read(&vport->fc_adisc_cnt),
+ atomic_read(&vport->fc_npr_cnt));
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5027,11 +5892,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Register the VPI for SLI3, NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_RSCN_MODE) &&
+ !test_bit(FC_PT2PT, &vport->fc_flag) &&
+ !test_bit(FC_RSCN_MODE, &vport->fc_flag) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
@@ -5042,19 +5906,16 @@ lpfc_disc_start(struct lpfc_vport *vport)
*/
if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
+ lpfc_issue_clear_la(phba, vport);
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) {
vport->num_disc_nodes = 0;
/* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
+ if (atomic_read(&vport->fc_npr_cnt))
lpfc_els_disc_plogi(vport);
if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag);
lpfc_can_disctmo(vport);
}
}
@@ -5066,18 +5927,17 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
- if (vport->fc_flag & FC_RSCN_MODE) {
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
/* Check to see if more RSCNs came in while we
* were processing this one.
*/
- if ((vport->fc_rscn_id_cnt == 0) &&
- (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(shost->host_lock);
+ if (vport->fc_rscn_id_cnt == 0 &&
+ !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
+ clear_bit(FC_RSCN_MODE, &vport->fc_flag);
lpfc_can_disctmo(vport);
- } else
+ } else {
lpfc_els_handle_rscn(vport);
+ }
}
}
return;
@@ -5091,25 +5951,26 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
- IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
+ u32 ulp_command;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
/* Error matching iocb on txq or txcmplq
* First check the txq.
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
list_move_tail(&iocb->list, &completions);
}
@@ -5117,17 +5978,21 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
- icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
spin_unlock_irq(&phba->hbalock);
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
@@ -5139,11 +6004,10 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_hba *phba = vport->phba;
- if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ if (atomic_read(&vport->fc_plogi_cnt) ||
+ atomic_read(&vport->fc_adisc_cnt)) {
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
lpfc_free_tx(phba, ndlp);
@@ -5152,12 +6016,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
@@ -5176,9 +6062,9 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
*/
/*****************************************************************************/
void
-lpfc_disc_timeout(unsigned long ptr)
+lpfc_disc_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_vport *vport = timer_container_of(vport, t, fc_disctmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long flags = 0;
@@ -5200,20 +6086,15 @@ lpfc_disc_timeout(unsigned long ptr)
static void
lpfc_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
- if (!(vport->fc_flag & FC_DISC_TMO))
+ if (!test_and_clear_bit(FC_DISC_TMO, &vport->fc_flag))
return;
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(shost->host_lock);
-
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"disc timeout: state:x%x rtry:x%x flg:x%x",
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
@@ -5221,24 +6102,23 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
- /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
- * FAN
- */
- /* FAN timeout */
+ /*
+ * port_state is identically LPFC_LOCAL_CFG_LINK while
+ * waiting for FAN timeout
+ */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0221 FAN timeout\n");
+
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
/* Clean up the ndlp on Fabric connections */
lpfc_drop_node(vport, ndlp);
- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) {
/* Fail outstanding IO now since device
* is marked for PLOGI.
*/
@@ -5258,7 +6138,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FLOGI:
/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0222 Initial %s timeout\n",
vport->vpi ? "FDISC" : "FLOGI");
@@ -5276,12 +6157,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0223 Timeout while waiting for "
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ if (ndlp)
lpfc_els_abort(phba, ndlp);
/* ReStart discovery */
@@ -5289,7 +6171,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0224 NameServer Query timeout "
"Data: x%x x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
@@ -5297,8 +6180,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
vport->fc_ns_retry++;
- rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
- vport->fc_ns_retry, 0);
+ vport->gidft_inp = 0;
+ rc = lpfc_issue_gidft(vport);
if (rc == 0)
break;
}
@@ -5322,7 +6205,8 @@ restart_disc:
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0206 Device Discovery "
"completion error\n");
phba->link_state = LPFC_HBA_ERROR;
@@ -5344,7 +6228,8 @@ restart_disc:
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0227 Node Authentication timeout\n");
lpfc_disc_flush_list(vport);
@@ -5363,11 +6248,13 @@ restart_disc:
break;
case LPFC_VPORT_READY:
- if (vport->fc_flag & FC_RSCN_MODE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
- "x%x\n",
- vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+ "x%x x%x x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
+ vport->port_state, vport->gidft_inp);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
@@ -5378,7 +6265,8 @@ restart_disc:
break;
default:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
@@ -5387,21 +6275,23 @@ restart_disc:
switch (phba->link_state) {
case LPFC_CLEAR_LA:
/* CLEAR LA timeout */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0228 CLEAR LA timeout\n");
clrlaerr = 1;
break;
case LPFC_LINK_UP:
lpfc_issue_clear_la(phba, vport);
- /* Drop thru */
+ fallthrough;
case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
case LPFC_HBA_ERROR:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_TRACE_EVENT,
"0230 Unexpected timeout, hba link "
"state x%x\n", phba->link_state);
clrlaerr = 1;
@@ -5413,12 +6303,14 @@ restart_disc:
if (clrlaerr) {
lpfc_disc_flush_list(vport);
- psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_FCP_RING].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ }
vport->port_state = LPFC_VPORT_READY;
}
-
return;
}
@@ -5432,39 +6324,40 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->context1 = NULL;
- pmb->context2 = NULL;
+ pmb->ctx_ndlp = NULL;
if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
- ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag);
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
-
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0004 rpi:%x DID:%x flg:%lx %d x%px\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp);
/*
* Start issuing Fabric-Device Management Interface (FDMI) command to
- * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
- * fdmi-on=2 (supporting RPA/hostnmae)
+ * 0xfffffa (FDMI well known port).
+ * DHBA -> DPRT -> RHBA -> RPA (physical port)
+ * DPRT -> RPRT (vports)
*/
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ } else {
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+ }
- if (vport->cfg_fdmi_on == 1)
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
- else
- mod_timer(&vport->fc_fdmitmo,
- jiffies + msecs_to_jiffies(1000 * 60));
/* decrement the node reference count held for this callback
* function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -5473,10 +6366,6 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
- /* check for active node */
- if (!NLP_CHK_NODE_ACT(ndlp))
- return 0;
-
return ndlp->nlp_rpi == *rpi;
}
@@ -5494,16 +6383,19 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
+ "3185 FIND node filter %ps DID "
+ "ndlp x%px did x%x flg x%lx st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3186 FIND node filter %p NOT FOUND.\n", filter);
+ "3186 FIND node filter %ps NOT FOUND.\n", filter);
return NULL;
}
@@ -5543,10 +6435,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long flags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, flags);
ndlp = __lpfc_findnode_rpi(vport, rpi);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, flags);
return ndlp;
}
@@ -5576,44 +6469,79 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
* Translate the physical vpi to the logical vpi. The
* vport stores the logical vpi.
*/
- for (i = 0; i < phba->max_vpi; i++) {
+ for (i = 0; i <= phba->max_vpi; i++) {
if (vpi == phba->vpi_ids[i])
break;
}
- if (i >= phba->max_vpi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2936 Could not find Vport mapped "
- "to vpi %d\n", vpi);
+ if (i > phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
return NULL;
}
}
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
-void
-lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- uint32_t did)
+struct lpfc_nodelist *
+lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
{
+ struct lpfc_nodelist *ndlp;
+ int rpi = LPFC_RPI_ALLOC_ERROR;
+
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return NULL;
+ }
+
+ ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(vport->phba, rpi);
+ return NULL;
+ }
+
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ spin_lock_init(&ndlp->lock);
+
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ ndlp->nlp_rpi = rpi;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:x%x "
+ "flg:x%lx refcnt:%d\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref));
+
+ ndlp->active_rrqs_xri_bitmap =
+ mempool_alloc(vport->phba->active_rrq_pool,
+ GFP_KERNEL);
+ if (ndlp->active_rrqs_xri_bitmap)
+ memset(ndlp->active_rrqs_xri_bitmap, 0,
+ ndlp->phba->cfg_rrq_xri_bitmap_sz);
+ }
+
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
ndlp->nlp_DID, 0, 0);
- return;
+ return ndlp;
}
/* This routine releases all resources associated with a specifc NPort's ndlp
@@ -5622,37 +6550,44 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static void
lpfc_nlp_release(struct kref *kref)
{
- struct lpfc_hba *phba;
- unsigned long flags;
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
+ struct lpfc_vport *vport = ndlp->vport;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node release: did:x%x flg:x%x type:x%x",
+ "node release: did:x%x flg:x%lx type:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 lpfc_nlp_release: ndlp:x%p did %x "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
+ __func__, ndlp, ndlp->nlp_DID,
+ kref_read(&ndlp->kref), ndlp->nlp_rpi);
/* remove ndlp from action. */
- lpfc_nlp_remove(ndlp->vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
- /* clear the ndlp active flag for all release cases */
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- NLP_CLR_NODE_ACT(ndlp);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ /* All nodes are initialized with an RPI that needs to be released
+ * now. All references are gone and the node has been dequeued.
+ */
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
+
+ /* The node is not freed back to memory, it is released to a pool so
+ * the node fields need to be cleaned up.
+ */
+ ndlp->vport = NULL;
+ ndlp->nlp_state = NLP_STE_FREED_NODE;
+ ndlp->nlp_flag = 0;
+ ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp)) {
- kfree(ndlp->lat_data);
- mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
- }
+ if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
+ mempool_free(ndlp->active_rrqs_xri_bitmap,
+ ndlp->phba->active_rrq_pool);
+ mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
}
/* This routine bumps the reference count for a ndlp structure to ensure
@@ -5662,120 +6597,50 @@ lpfc_nlp_release(struct kref *kref)
struct lpfc_nodelist *
lpfc_nlp_get(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
unsigned long flags;
if (ndlp) {
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node get: did:x%x flg:x%x refcnt:x%x",
+ "node get: did:x%x flg:x%lx refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
+ kref_read(&ndlp->kref));
+
/* The check of ndlp usage to prevent incrementing the
* ndlp reference count that is in the process of being
* released.
*/
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ spin_lock_irqsave(&ndlp->lock, flags);
+ if (!kref_get_unless_zero(&ndlp->kref)) {
+ spin_unlock_irqrestore(&ndlp->lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 lpfc_nlp_get: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
+ "0276 %s: ndlp:x%px refcnt:%d\n",
+ __func__, (void *)ndlp, kref_read(&ndlp->kref));
return NULL;
- } else
- kref_get(&ndlp->kref);
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ }
+ spin_unlock_irqrestore(&ndlp->lock, flags);
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
}
+
return ndlp;
}
/* This routine decrements the reference count for a ndlp structure. If the
- * count goes to 0, this indicates the the associated nodelist should be
- * freed. Returning 1 indicates the ndlp resource has been released; on the
- * other hand, returning 0 indicates the ndlp resource has not been released
- * yet.
+ * count goes to 0, this indicates the associated nodelist should be freed.
*/
int
lpfc_nlp_put(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- unsigned long flags;
-
- if (!ndlp)
- return 1;
-
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
- phba = ndlp->phba;
- spin_lock_irqsave(&phba->ndlp_lock, flags);
- /* Check the ndlp memory free acknowledge flag to avoid the
- * possible race condition that kref_put got invoked again
- * after previous one has done ndlp memory free.
- */
- if (NLP_CHK_FREE_ACK(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 lpfc_nlp_put: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- return 1;
- }
- /* Check the ndlp inactivate log flag to avoid the possible
- * race condition that kref_put got invoked again after ndlp
- * is already in inactivating state.
- */
- if (NLP_CHK_IACT_REQ(ndlp)) {
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 lpfc_nlp_put: ndlp:x%p "
- "usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
- atomic_read(&ndlp->kref.refcount));
- return 1;
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node put: did:x%x flg:x%lx refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
+ } else {
+ WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
}
- /* For last put, mark the ndlp usage flags to make sure no
- * other kref_get and kref_put on the same ndlp shall get
- * in between the process when the final kref_put has been
- * invoked on this ndlp.
- */
- if (atomic_read(&ndlp->kref.refcount) == 1) {
- /* Indicate ndlp is put to inactive state. */
- NLP_SET_IACT_REQ(ndlp);
- /* Acknowledge ndlp memory free has been seen. */
- if (NLP_CHK_FREE_REQ(ndlp))
- NLP_SET_FREE_ACK(ndlp);
- }
- spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- /* Note, the kref_put returns 1 when decrementing a reference
- * count that was 1, it invokes the release callback function,
- * but it still left the reference count as 1 (not actually
- * performs the last decrementation). Otherwise, it actually
- * decrements the reference count and returns 0.
- */
- return kref_put(&ndlp->kref, lpfc_nlp_release);
-}
-/* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the
- * ndlp has been freed. A return value of 0 indicates the ndlp is
- * not yet been released.
- */
-int
-lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
-{
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node not used: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- atomic_read(&ndlp->kref.refcount));
- if (atomic_read(&ndlp->kref.refcount) == 1)
- if (lpfc_nlp_put(ndlp))
- return 1;
- return 0;
+ return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
/**
@@ -5794,7 +6659,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
struct lpfc_vport **vports;
int i, ret = 0;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
+ unsigned long iflags;
vports = lpfc_create_vport_work_array(phba);
@@ -5803,35 +6668,36 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
return 1;
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
/*
* IF the CVL_RCVD bit is not set then we have sent the
* flogi.
* If dev_loss fires while we are waiting we do not want to
* unreg the fcf.
*/
- if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
- spin_unlock_irq(shost->host_lock);
+ if (!test_bit(FC_VPORT_CVL_RCVD, &vports[i]->fc_flag)) {
ret = 1;
goto out;
}
+ spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+ if (ndlp->rport &&
(ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
ret = 1;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
+ iflags);
goto out;
- } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ } else if (test_bit(NLP_RPI_REGISTERED,
+ &ndlp->nlp_flag)) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2624 RPI %x DID %x flag %x "
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "2624 RPI %x DID %x flag %lx "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag);
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
}
out:
lpfc_destroy_vport_work_array(phba, vports);
@@ -5849,17 +6715,14 @@ void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2555 UNREG_VFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2555 UNREG_VFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
}
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
+ clear_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag);
mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
@@ -5877,10 +6740,10 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct lpfc_vport *vport = mboxq->vport;
if (mboxq->u.mb.mbxStatus) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2550 UNREG_FCFI mbxStatus error x%x "
- "HBA state x%x\n",
- mboxq->u.mb.mbxStatus, vport->port_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2550 UNREG_FCFI mbxStatus error x%x "
+ "HBA state x%x\n",
+ mboxq->u.mb.mbxStatus, vport->port_state);
}
mempool_free(mboxq, phba->mbox_mem_pool);
return;
@@ -5923,9 +6786,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
- vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &vports[i]->fc_flag);
}
lpfc_destroy_vport_work_array(phba, vports);
if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
@@ -5938,9 +6801,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
lpfc_mbx_unreg_vpi(phba->pport);
shost = lpfc_shost_from_vport(phba->pport);
spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
+ set_bit(FC_VPORT_NEEDS_INIT_VPI, &phba->pport->fc_flag);
}
/* Cleanup any outstanding ELS commands */
@@ -5969,7 +6832,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2551 UNREG_FCFI mbox allocation failed"
"HBA state x%x\n", phba->pport->port_state);
return -ENOMEM;
@@ -5980,7 +6843,7 @@ lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2552 Unregister FCFI command failed rc x%x "
"HBA state x%x\n",
rc, phba->pport->port_state);
@@ -6004,7 +6867,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2748 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@@ -6015,15 +6878,17 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
if (rc)
return;
/* Reset HBA FCF states after successful unregister FCF */
+ spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag = 0;
+ spin_unlock_irq(&phba->hbalock);
phba->fcf.current_rec.flag = 0;
/*
* If driver is not unloading, check if there is any other
* FCF record that can be used for discovery.
*/
- if ((phba->pport->load_flag & FC_UNLOADING) ||
- (phba->link_state < LPFC_LINK_UP))
+ if (test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
+ phba->link_state < LPFC_LINK_UP)
return;
/* This is considered as the initial FCF discovery scan */
@@ -6040,7 +6905,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2553 lpfc_unregister_unused_fcf failed "
"to read FCF record HBA state x%x\n",
phba->pport->port_state);
@@ -6062,7 +6927,7 @@ lpfc_unregister_fcf(struct lpfc_hba *phba)
/* Preparation for unregistering fcf */
rc = lpfc_unregister_fcf_prep(phba);
if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2749 Failed to prepare for unregistering "
"HBA's FCF record: rc=%d\n", rc);
return;
@@ -6095,11 +6960,11 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
* registered, do nothing.
*/
spin_lock_irq(&phba->hbalock);
- if (!(phba->hba_flag & HBA_FCOE_MODE) ||
+ if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) ||
!(phba->fcf.fcf_flag & FCF_REGISTERED) ||
- !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+ !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) ||
(phba->fcf.fcf_flag & FCF_DISCOVERY) ||
- (phba->pport->port_state == LPFC_FLOGI)) {
+ phba->pport->port_state == LPFC_FLOGI) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -6149,18 +7014,14 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
GFP_KERNEL);
if (!conn_entry) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2566 Failed to allocate connection"
- " table entry\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2566 Failed to allocate connection"
+ " table entry\n");
return;
}
memcpy(&conn_entry->conn_rec, &conn_rec[i],
sizeof(struct lpfc_fcf_conn_rec));
- conn_entry->conn_rec.vlan_tag =
- conn_entry->conn_rec.vlan_tag;
- conn_entry->conn_rec.flags =
- conn_entry->conn_rec.flags;
list_add_tail(&conn_entry->list,
&phba->fcf_conn_rec_list);
}
@@ -6287,7 +7148,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
uint8_t *buff,
uint32_t size)
{
- uint32_t offset = 0, rec_length;
+ uint32_t offset = 0;
uint8_t *rec_ptr;
/*
@@ -6299,7 +7160,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the region signature first */
if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2567 Config region 23 has bad signature\n");
return;
}
@@ -6308,14 +7169,12 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
/* Check the data structure version */
if (buff[offset] != LPFC_REGION23_VERSION) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2568 Config region 23 has bad version\n");
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2568 Config region 23 has bad version\n");
return;
}
offset += 4;
- rec_length = buff[offset + 1];
-
/* Read FCoE param record */
rec_ptr = lpfc_get_rec_conf23(&buff[offset],
size - offset, FCOE_PARAM_TYPE);
@@ -6329,3 +7188,38 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
lpfc_read_fcf_conn_tbl(phba, rec_ptr);
}
+
+/*
+ * lpfc_error_lost_link - IO failure from link event or FW reset check.
+ *
+ * @vport: Pointer to lpfc_vport data structure.
+ * @ulp_status: IO completion status.
+ * @ulp_word4: Reason code for the ulp_status.
+ *
+ * This function evaluates the ulp_status and ulp_word4 values
+ * for specific error values that indicate an internal link fault
+ * or fw reset event for the completing IO. Callers require this
+ * common data to decide next steps on the IO.
+ *
+ * Return:
+ * false - No link or reset error occurred.
+ * true - A link or reset error occurred.
+ */
+bool
+lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4)
+{
+ /* Mask off the extra port data to get just the reason code. */
+ u32 rsn_code = IOERR_PARAM_MASK & ulp_word4;
+
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (rsn_code == IOERR_SLI_ABORTED ||
+ rsn_code == IOERR_LINK_DOWN ||
+ rsn_code == IOERR_SLI_DOWN)) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS,
+ "0408 Report link error true: <x%x:x%x>\n",
+ ulp_status, ulp_word4);
+ return true;
+ }
+
+ return false;
+}