summaryrefslogtreecommitdiff
path: root/drivers/scsi/qedf/qedf_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qedf/qedf_main.c')
-rw-r--r--drivers/scsi/qedf/qedf_main.c1018
1 files changed, 780 insertions, 238 deletions
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 9bbc19fc190b..7792e00800ae 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic FCoE Offload Driver
* Copyright (c) 2016-2018 Cavium Inc.
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -16,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/kthread.h>
+#include <linux/phylink.h>
#include <scsi/libfc.h>
#include <scsi/scsi_host.h>
#include <scsi/fc_frame.h>
@@ -30,6 +28,10 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
+static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
/*
* Driver module parameters.
@@ -40,7 +42,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
"remote ports (default 60)");
uint qedf_debug = QEDF_LOG_INFO;
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
" mask");
@@ -104,6 +106,12 @@ module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
+static bool qedf_enable_recovery = true;
+module_param_named(enable_recovery, qedf_enable_recovery,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
+ "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
+
struct workqueue_struct *qedf_io_wq;
static struct fcoe_percpu_s qedf_global;
@@ -113,35 +121,45 @@ static struct kmem_cache *qedf_io_work_cache;
void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
{
- qedf->vlan_id = vlan_id;
- qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
- "prio=%d.\n", vlan_id, qedf->prio);
+ int vlan_id_tmp = 0;
+
+ vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
+ qedf->vlan_id = vlan_id_tmp;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Setting vlan_id=0x%04x prio=%d.\n",
+ vlan_id_tmp, qedf->prio);
}
/* Returns true if we have a valid vlan, false otherwise */
static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
{
- int rc;
-
- if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
- QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
- return false;
- }
while (qedf->fipvlan_retries--) {
- if (qedf->vlan_id > 0)
+ /* This is to catch if link goes down during fipvlan retries */
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
+ return false;
+ }
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
+ return false;
+ }
+
+ if (qedf->vlan_id > 0) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "vlan = 0x%x already set, calling ctlr_link_up.\n",
+ qedf->vlan_id);
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ fcoe_ctlr_link_up(&qedf->ctlr);
return true;
+ }
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Retry %d.\n", qedf->fipvlan_retries);
init_completion(&qedf->fipvlan_compl);
qedf_fcoe_send_vlan_req(qedf);
- rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
- 1 * HZ);
- if (rc > 0) {
- fcoe_ctlr_link_up(&qedf->ctlr);
- return true;
- }
+ wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
}
return false;
@@ -153,12 +171,21 @@ static void qedf_handle_link_update(struct work_struct *work)
container_of(work, struct qedf_ctx, link_update.work);
int rc;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
+ atomic_read(&qedf->link_state));
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
rc = qedf_initiate_fipvlan_req(qedf);
if (rc)
return;
+
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Link is down, resetting vlan_id.\n");
+ qedf->vlan_id = 0;
+ return;
+ }
+
/*
* If we get here then we never received a repsonse to our
* fip vlan request so set the vlan_id to the default and
@@ -185,7 +212,9 @@ static void qedf_handle_link_update(struct work_struct *work)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Calling fcoe_ctlr_link_down().\n");
fcoe_ctlr_link_down(&qedf->ctlr);
- qedf_wait_for_upload(qedf);
+ if (qedf_wait_for_upload(qedf) == false)
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Could not upload all sessions.\n");
/* Reset the number of FIP VLAN retries */
qedf->fipvlan_retries = qedf_fipvlan_retries;
}
@@ -263,6 +292,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
+ qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -288,6 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ qedf->flogi_pending++;
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ qedf->flogi_pending = 0;
+ }
+
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -302,8 +344,10 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
lport = qedf->lport;
- if (!lport->tt.elsct_send)
+ if (!lport->tt.elsct_send) {
+ QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
return -EINVAL;
+ }
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
@@ -321,11 +365,6 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
return 0;
}
-struct qedf_tmp_rdata_item {
- struct fc_rport_priv *rdata;
- struct list_head list;
-};
-
/*
* This function is called if link_down_tmo is in use. If we get a link up and
* link_down_tmo has not expired then use just FLOGI/ADISC to recover our
@@ -335,9 +374,8 @@ static void qedf_link_recovery(struct work_struct *work)
{
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, link_recovery.work);
- struct qedf_rport *fcport;
+ struct fc_lport *lport = qedf->lport;
struct fc_rport_priv *rdata;
- struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
bool rc;
int retries = 30;
int rval, i;
@@ -404,38 +442,20 @@ static void qedf_link_recovery(struct work_struct *work)
* Call lport->tt.rport_login which will cause libfc to send an
* ADISC since the rport is in state ready.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
- rdata = fcport->rdata;
- if (rdata == NULL)
- continue;
- rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
- GFP_ATOMIC);
- if (!rdata_item)
- continue;
+ mutex_lock(&lport->disc.disc_mutex);
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
- rdata_item->rdata = rdata;
- list_add(&rdata_item->list, &rdata_login_list);
- } else
- kfree(rdata_item);
- }
- rcu_read_unlock();
- /*
- * Do the fc_rport_login outside of the rcu lock so we don't take a
- * mutex in an atomic context.
- */
- list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
- list) {
- list_del(&rdata_item->list);
- fc_rport_login(rdata_item->rdata);
- kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
- kfree(rdata_item);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
+ mutex_unlock(&lport->disc.disc_mutex);
}
static void qedf_update_link_speed(struct qedf_ctx *qedf,
struct qed_link_output *link)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
struct fc_lport *lport = qedf->lport;
lport->link_speed = FC_PORTSPEED_UNKNOWN;
@@ -458,6 +478,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
case 100000:
lport->link_speed = FC_PORTSPEED_100GBIT;
break;
+ case 20000:
+ lport->link_speed = FC_PORTSPEED_20GBIT;
+ break;
default:
lport->link_speed = FC_PORTSPEED_UNKNOWN;
break;
@@ -467,23 +490,105 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
* Set supported link speed by querying the supported
* capabilities of the link.
*/
- if (link->supported_caps & SUPPORTED_10000baseKR_Full)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 10000baseT_Full);
+ phylink_set(sup_caps, 10000baseKX4_Full);
+ phylink_set(sup_caps, 10000baseR_FEC);
+ phylink_set(sup_caps, 10000baseCR_Full);
+ phylink_set(sup_caps, 10000baseSR_Full);
+ phylink_set(sup_caps, 10000baseLR_Full);
+ phylink_set(sup_caps, 10000baseLRM_Full);
+ phylink_set(sup_caps, 10000baseKR_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
- if (link->supported_caps & SUPPORTED_25000baseKR_Full)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 25000baseKR_Full);
+ phylink_set(sup_caps, 25000baseCR_Full);
+ phylink_set(sup_caps, 25000baseSR_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
- if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 40000baseLR4_Full);
+ phylink_set(sup_caps, 40000baseKR4_Full);
+ phylink_set(sup_caps, 40000baseCR4_Full);
+ phylink_set(sup_caps, 40000baseSR4_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 50000baseKR2_Full);
+ phylink_set(sup_caps, 50000baseCR2_Full);
+ phylink_set(sup_caps, 50000baseSR2_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
- if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 100000baseKR4_Full);
+ phylink_set(sup_caps, 100000baseSR4_Full);
+ phylink_set(sup_caps, 100000baseCR4_Full);
+ phylink_set(sup_caps, 100000baseLR4_ER4_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+
+ phylink_zero(sup_caps);
+ phylink_set(sup_caps, 20000baseKR2_Full);
+
+ if (linkmode_intersects(link->supported_caps, sup_caps))
+ lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
+ if (lport->host && lport->host->shost_data)
+ fc_host_supported_speeds(lport->host) =
+ lport->link_supported_speeds;
+}
+
+static void qedf_bw_update(void *dev)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ struct qed_link_output link;
+
+ /* Get the latest status of the link */
+ qed_ops->common->get_link(qedf->cdev, &link);
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
+ if (link.link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ qedf_update_link_speed(qedf, &link);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore bw update, link is down.\n");
+
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+ }
}
static void qedf_link_update(void *dev, struct qed_link_output *link)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ /*
+ * Prevent race where we're removing the module and we get link update
+ * for qed.
+ */
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
if (link->link_up) {
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
@@ -563,7 +668,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
tmp_prio = get->operational.app_prio.fcoe;
if (qedf_default_prio > -1)
qedf->prio = qedf_default_prio;
- else if (tmp_prio < 0 || tmp_prio > 7) {
+ else if (tmp_prio > 7) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"FIP/FCoE prio %d out of range, setting to %d.\n",
tmp_prio, QEDF_DEFAULT_PRIO);
@@ -594,11 +699,14 @@ static u32 qedf_get_login_failures(void *cookie)
}
static struct qed_fcoe_cb_ops qedf_cb_ops = {
- {
+ .common = {
.link_update = qedf_link_update,
+ .bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
+ .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
}
};
@@ -615,50 +723,113 @@ static struct scsi_transport_template *qedf_fc_vport_transport_template;
static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct qedf_rport *fcport;
struct fc_lport *lport;
struct qedf_ctx *qedf;
struct qedf_ioreq *io_req;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport_priv *rdata;
+ struct qedf_rport *fcport = NULL;
int rc = FAILED;
+ int wait_count = 100;
+ int refcount = 0;
int rval;
-
- if (fc_remote_port_chkready(rport)) {
- QEDF_ERR(NULL, "rport not ready\n");
- goto out;
- }
+ int got_ref = 0;
lport = shost_priv(sc_cmd->device->host);
qedf = (struct qedf_ctx *)lport_priv(lport);
- if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ fcport = (struct qedf_rport *)&rp[1];
+ rdata = fcport->rdata;
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
+ rc = SUCCESS;
goto out;
}
- fcport = (struct qedf_rport *)&rp[1];
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ io_req = qedf_priv(sc_cmd)->io_req;
if (!io_req) {
- QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
+ sc_cmd, sc_cmd->cmnd[0],
+ rdata->ids.port_id);
rc = SUCCESS;
- goto out;
+ goto drop_rdata_kref;
}
- QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
- "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+ rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
+ if (rval)
+ got_ref = 1;
+
+ /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
+ if (!rval || io_req->sc_cmd != sc_cmd) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
+ io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
+
+ goto drop_rdata_kref;
+ }
+
+ if (fc_remote_port_chkready(rport)) {
+ refcount = kref_read(&io_req->refcount);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
+ io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
+ refcount, rdata->ids.port_id);
+
+ goto drop_rdata_kref;
+ }
+
+ rc = fc_block_rport(rport);
+ if (rc)
+ goto drop_rdata_kref;
+
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Connection uploading, xid=0x%x., port_id=%06x\n",
+ io_req->xid, rdata->ids.port_id);
+ while (io_req->sc_cmd && (wait_count != 0)) {
+ msleep(100);
+ wait_count--;
+ }
+ if (wait_count) {
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
+ rc = SUCCESS;
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
+ rc = FAILED;
+ }
+ goto drop_rdata_kref;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
+ goto drop_rdata_kref;
+ }
+
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
+ io_req, sc_cmd, io_req->xid, io_req->fp_idx,
+ rdata->ids.port_id);
if (qedf->stop_io_on_error) {
qedf_stop_all_io(qedf);
rc = SUCCESS;
- goto out;
+ goto drop_rdata_kref;
}
init_completion(&io_req->abts_done);
rval = qedf_initiate_abts(io_req, true);
if (rval) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
- goto out;
+ /*
+ * If we fail to queue the ABTS then return this command to
+ * the SCSI layer as it will own and free the xid
+ */
+ rc = SUCCESS;
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
+ goto drop_rdata_kref;
}
wait_for_completion(&io_req->abts_done);
@@ -684,55 +855,111 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
io_req->xid);
+drop_rdata_kref:
+ kref_put(&rdata->kref, fc_rport_destroy);
out:
+ if (got_ref)
+ kref_put(&io_req->refcount, qedf_release_cmd);
return rc;
}
static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
{
+ struct scsi_target *starget = scsi_target(sc_cmd->device);
+ struct fc_rport *rport = starget_to_rport(starget);
+
QEDF_ERR(NULL, "TARGET RESET Issued...");
- return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+ return qedf_initiate_tmf(rport, 0, FCP_TMF_TGT_RESET);
}
static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+
QEDF_ERR(NULL, "LUN RESET Issued...\n");
- return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+ return qedf_initiate_tmf(rport, sc_cmd->device->lun, FCP_TMF_LUN_RESET);
}
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
{
- while (1) {
+ struct qedf_rport *fcport;
+ int wait_cnt = 120;
+
+ while (wait_cnt--) {
if (atomic_read(&qedf->num_offloads))
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "Waiting for all uploads to complete.\n");
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Waiting for all uploads to complete num_offloads = 0x%x.\n",
+ atomic_read(&qedf->num_offloads));
else
- break;
+ return true;
msleep(500);
}
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+ if (test_bit(QEDF_RPORT_SESSION_READY,
+ &fcport->flags)) {
+ if (fcport->rdata)
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Waiting for fcport %p portid=%06x.\n",
+ fcport, fcport->rdata->ids.port_id);
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Waiting for fcport %p.\n", fcport);
+ }
+ }
+
+ rcu_read_unlock();
+ return false;
}
/* Performs soft reset of qedf_ctx by simulating a link down/up */
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
+void qedf_ctx_soft_reset(struct fc_lport *lport)
{
struct qedf_ctx *qedf;
+ struct qed_link_output if_link;
+
+ qedf = lport_priv(lport);
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
- qedf = lport_priv(lport);
-
+ qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Queuing link down work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
- qedf_wait_for_upload(qedf);
+
+ if (qedf_wait_for_upload(qedf) == false) {
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
+ WARN_ON(atomic_read(&qedf->num_offloads));
+ }
+
+ /* Before setting link up query physical link state */
+ qed_ops->common->get_link(qedf->cdev, &if_link);
+ /* Bail if the physical link is not up */
+ if (!if_link.link_up) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Physical link is not up.\n");
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+ return;
+ }
+ /* Flush and wait to make sure link down is processed */
+ flush_delayed_work(&qedf->link_update);
+ msleep(500);
+
atomic_set(&qedf->link_state, QEDF_LINK_UP);
qedf->vlan_id = 0;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Queue link up work.\n");
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
+ clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
}
/* Reset the host by gracefully logging out and then logging back in */
@@ -740,22 +967,6 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
{
struct fc_lport *lport;
struct qedf_ctx *qedf;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
- struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
- int rval;
-
- rval = fc_remote_port_chkready(rport);
-
- if (rval) {
- QEDF_ERR(NULL, "device_reset rport not ready\n");
- return FAILED;
- }
-
- if (fcport == NULL) {
- QEDF_ERR(NULL, "device_reset: rport is NULL\n");
- return FAILED;
- }
lport = shost_priv(sc_cmd->device->host);
qedf = lport_priv(lport);
@@ -771,7 +982,8 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
return SUCCESS;
}
-static int qedf_slave_configure(struct scsi_device *sdev)
+static int qedf_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (qedf_queue_depth) {
scsi_change_queue_depth(sdev, qedf_queue_depth);
@@ -780,23 +992,24 @@ static int qedf_slave_configure(struct scsi_device *sdev)
return 0;
}
-static struct scsi_host_template qedf_host_template = {
+static const struct scsi_host_template qedf_host_template = {
.module = THIS_MODULE,
.name = QEDF_MODULE_NAME,
.this_id = -1,
.cmd_per_lun = 32,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
- .shost_attrs = qedf_host_attrs,
+ .shost_groups = qedf_host_groups,
.eh_abort_handler = qedf_eh_abort,
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
.eh_host_reset_handler = qedf_eh_host_reset,
- .slave_configure = qedf_slave_configure,
+ .sdev_configure = qedf_sdev_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
.change_queue_depth = scsi_change_queue_depth,
+ .cmd_size = sizeof(struct qedf_cmd_priv),
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
@@ -851,9 +1064,8 @@ static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
return rc;
}
-/**
+/*
* qedf_xmit - qedf FCoE frame transmit function
- *
*/
static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -868,7 +1080,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
u32 crc;
unsigned int hlen, tlen, elen;
int wlen;
- struct fc_stats *stats;
struct fc_lport *tmp_lport;
struct fc_lport *vn_port = NULL;
struct qedf_rport *fcport;
@@ -907,8 +1118,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
"Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
kfree_skb(skb);
rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
- if (rdata)
+ if (rdata) {
rdata->retries = lport->max_rport_retry_count;
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
return -EINVAL;
}
/* End NPIV filtering */
@@ -968,7 +1181,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
} else {
cp = skb_put(skb, tlen);
}
@@ -1014,10 +1227,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
hp->fcoe_sof = sof;
/*update tx stats */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* Get VLAN ID from skb for printing purposes */
__vlan_hwaccel_get_tag(skb, &vlan_tci);
@@ -1031,7 +1242,12 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1, skb->data, skb->len, false);
- qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+ kfree_skb(skb);
+ return rc;
+ }
return 0;
}
@@ -1137,7 +1353,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
- conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
@@ -1210,6 +1426,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL);
+ if (!term_params)
+ return;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
"port_id=%06x.\n", fcport->rdata->ids.port_id);
@@ -1224,6 +1442,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
struct qedf_rport *fcport)
{
+ struct fc_rport_priv *rdata = fcport->rdata;
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
fcport->rdata->ids.port_id);
@@ -1235,9 +1455,10 @@ static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
qedf_free_sq(qedf, fcport);
fcport->rdata = NULL;
fcport->qedf = NULL;
+ kref_put(&rdata->kref, fc_rport_destroy);
}
-/**
+/*
* This event_callback is called after successful completion of libfc
* initiated target login. qedf can proceed with initiating the session
* establishment.
@@ -1310,6 +1531,8 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
break;
}
+ /* Initial reference held on entry, so this can't fail */
+ kref_get(&rdata->kref);
fcport->rdata = rdata;
fcport->rport = rport;
@@ -1357,6 +1580,17 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (port_id == FC_FID_DIR_SERV)
break;
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "No action since spp type isn't FCP\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not FCP target so no action\n");
+ break;
+ }
+
if (!rport) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"port_id=%x - rport notcreated Yet!!\n", port_id);
@@ -1369,11 +1603,15 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
*/
fcport = (struct qedf_rport *)&rp[1];
+ spin_lock_irqsave(&fcport->rport_lock, flags);
/* Only free this fcport if it is offloaded already */
- if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
+ !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags)) {
+ set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_cleanup_fcport(qedf, fcport);
-
/*
* Remove fcport to list of qedf_ctx list of offloaded
* ports
@@ -1385,8 +1623,9 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
&fcport->flags);
atomic_dec(&qedf->num_offloads);
+ } else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
-
break;
case RPORT_EV_NONE:
@@ -1418,7 +1657,7 @@ static struct libfc_function_template qedf_lport_template = {
static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
{
- fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
+ fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
qedf->ctlr.send = qedf_fip_send;
qedf->ctlr.get_src_addr = qedf_get_src_mac;
@@ -1428,12 +1667,13 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
- struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
u8 buf[8];
- int i, pos;
+ int pos;
+ uint32_t i;
/*
- * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ * fdmi_enabled needs to be set for libfc
+ * to execute FDMI registration
*/
lport->fdmi_enabled = 1;
@@ -1449,33 +1689,50 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
for (i = 0; i < 8; i++)
pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number),
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X",
buf[7], buf[6], buf[5], buf[4],
buf[3], buf[2], buf[1], buf[0]);
} else
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "Unknown");
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "Unknown");
- snprintf(fc_host->manufacturer,
- sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
- snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+ if (qedf->pdev->device == QL45xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
- snprintf(fc_host->model_description, sizeof(fc_host->model_description),
- "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
- "(FCoE)");
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL45xxx FCoE Adapter");
+ }
+
+ if (qedf->pdev->device == QL41xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
- snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
- "Rev %d", qedf->pdev->revision);
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL41xxx FCoE Adapter");
+ }
- snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
- "%s", QEDF_VERSION);
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
+
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
+
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
+
+ snprintf(fc_host_vendor_identifier(lport->host),
+ FC_VENDOR_IDENTIFIER, "%s", "Marvell");
- snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
- "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1498,11 +1755,15 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_set_wwnn(lport, qedf->wwnn);
fc_set_wwpn(lport, qedf->wwpn);
- fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+ if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcoe_libfc_config failed.\n");
+ return -ENOMEM;
+ }
/* Allocate the exchange manager */
- fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
- qedf->max_els_xid, NULL);
+ fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
+ 0xfffe, NULL);
if (fc_lport_init_stats(lport))
return -ENOMEM;
@@ -1518,8 +1779,13 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
/* Set symbolic node name */
- snprintf(fc_host_symbolic_name(lport->host), 256,
- "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ if (qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
qedf_setup_fdmi(qedf);
@@ -1577,22 +1843,20 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
"WWPN (0x%s) already exists.\n", buf);
- goto err1;
+ return rc;
}
if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
"because link is not up.\n");
- rc = -EIO;
- goto err1;
+ return -EIO;
}
vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
if (!vn_port) {
QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
"for vport.\n");
- rc = -ENOMEM;
- goto err1;
+ return -ENOMEM;
}
fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1611,12 +1875,13 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
"for lport stats.\n");
- goto err2;
+ goto err;
}
fc_set_wwnn(vn_port, vport->node_name);
@@ -1625,15 +1890,17 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->wwpn = vn_port->wwpn;
vn_port->host->transportt = qedf_fc_vport_transport_template;
- vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+ vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
vn_port->host->max_lun = qedf_max_lun;
vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ vn_port->host->max_id = QEDF_MAX_SESSIONS;
rc = scsi_add_host(vn_port->host, &vport->dev);
if (rc) {
- QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
- goto err2;
+ QEDF_WARN(&base_qedf->dbg_ctx,
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
+ goto err;
}
/* Set default dev_loss_tmo based on module parameter */
@@ -1667,6 +1934,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
@@ -1674,9 +1962,10 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
-err2:
+ return 0;
+
+err:
scsi_host_put(vn_port->host);
-err1:
return rc;
}
@@ -1717,8 +2006,7 @@ static int qedf_vport_destroy(struct fc_vport *vport)
fc_lport_free_stats(vn_port);
/* Release Scsi_Host */
- if (vn_port->host)
- scsi_host_put(vn_port->host);
+ scsi_host_put(vn_port->host);
out:
return 0;
@@ -1773,6 +2061,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0;
}
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_port_id(shost) = lport->port_id;
+}
+
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost)
{
@@ -1843,6 +2138,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
@@ -1909,7 +2205,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -1936,12 +2232,11 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
struct qedf_io_work *io_work;
- int num_handled = 0;
unsigned int cpu;
struct qedf_ioreq *io_req = NULL;
u16 xid;
@@ -1964,7 +2259,6 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
while (new_cqes) {
fp->completions++;
- num_handled++;
cqe = &que->cq[que->cq_cons_idx];
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
@@ -1993,7 +2287,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
* on.
*/
if (!io_req)
- /* If there is not io_req assocated with this CQE
+ /* If there is not io_req associated with this CQE
* just queue it on CPU 0
*/
cpu = 0;
@@ -2086,16 +2380,21 @@ static void qedf_simd_int_handler(void *cookie)
static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
{
int i;
+ u16 vector_idx = 0;
+ u32 vector;
if (qedf->int_info.msix_cnt) {
for (i = 0; i < qedf->int_info.used_cnt; i++) {
- synchronize_irq(qedf->int_info.msix[i].vector);
- irq_set_affinity_hint(qedf->int_info.msix[i].vector,
- NULL);
- irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
- NULL);
- free_irq(qedf->int_info.msix[i].vector,
- &qedf->fp_array[i]);
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Freeing IRQ #%d vector_idx=%d.\n",
+ i, vector_idx);
+ vector = qedf->int_info.msix[vector_idx].vector;
+ synchronize_irq(vector);
+ irq_set_affinity_hint(vector, NULL);
+ irq_set_affinity_notifier(vector, NULL);
+ free_irq(vector, &qedf->fp_array[i]);
}
} else
qed_ops->common->simd_handler_clean(qedf->cdev,
@@ -2108,11 +2407,19 @@ static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
static int qedf_request_msix_irq(struct qedf_ctx *qedf)
{
int i, rc, cpu;
+ u16 vector_idx = 0;
+ u32 vector;
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < qedf->num_queues; i++) {
- rc = request_irq(qedf->int_info.msix[i].vector,
- qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Requesting IRQ #%d vector_idx=%d.\n",
+ i, vector_idx);
+ vector = qedf->int_info.msix[vector_idx].vector;
+ rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
+ &qedf->fp_array[i]);
if (rc) {
QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
@@ -2121,8 +2428,7 @@ static int qedf_request_msix_irq(struct qedf_ctx *qedf)
}
qedf->int_info.used_cnt++;
- rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
- get_cpu_mask(cpu));
+ rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
@@ -2155,7 +2461,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
qedf->int_info.used_cnt = 1;
- QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Cannot load driver due to a lack of MSI-X vectors.\n");
return -EINVAL;
}
@@ -2198,12 +2505,14 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
fr_dev(fp) = lport;
fr_sof(fp) = hp->fcoe_sof;
if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
kfree_skb(skb);
return;
}
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
if (pskb_trim(skb, fr_len)) {
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
kfree_skb(skb);
return;
}
@@ -2264,9 +2573,9 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
* empty then this is not addressed to our port so simply drop it.
*/
if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
- lport->port_id, ntoh24(fh->fh_d_id));
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
kfree_skb(skb);
return;
}
@@ -2275,6 +2584,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
(f_ctl & FC_FC_EX_CTX)) {
/* Drop incoming ABTS response that has both SEQ/EX CTX set */
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping ABTS response as both SEQ/EX CTX set.\n");
kfree_skb(skb);
return;
}
@@ -2356,6 +2667,13 @@ static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
struct qedf_skb_work *skb_work;
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping frame as link state is down.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
if (!skb_work) {
QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
@@ -2403,16 +2721,17 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
- QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
- "for id = %d.\n", sb_id);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
return -ENOMEM;
}
@@ -2420,8 +2739,10 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
sb_id, QED_SB_TYPE_STORAGE);
if (ret) {
- QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
- "failed for id = %d.\n", sb_id);
+ dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Status block initialization failed (0x%x) for id = %d.\n",
+ ret, sb_id);
return ret;
}
@@ -2497,6 +2818,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
struct qedf_ioreq *io_req;
struct qedf_rport *fcport;
u32 comp_type;
+ u8 io_comp_type;
+ unsigned long flags;
comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
FCOE_CQE_CQE_TYPE_MASK;
@@ -2505,13 +2828,18 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
io_req = &qedf->cmd_mgr->cmds[xid];
/* Completion not for a valid I/O anymore so just return */
- if (!io_req)
+ if (!io_req) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req is NULL for xid=0x%x.\n", xid);
return;
+ }
fcport = io_req->fcport;
if (fcport == NULL) {
- QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcport is NULL for xid=0x%x io_req=%p.\n",
+ xid, io_req);
return;
}
@@ -2520,15 +2848,19 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
* isn't valid and shouldn't be taken. We should just return.
*/
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Session not offloaded yet, fcport = %p.\n", fcport);
return;
}
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ io_comp_type = io_req->cmd_type;
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
switch (comp_type) {
case FCOE_GOOD_COMPLETION_CQE_TYPE:
atomic_inc(&fcport->free_sqes);
- switch (io_req->cmd_type) {
+ switch (io_comp_type) {
case QEDF_SCSI_CMD:
qedf_scsi_completion(qedf, cqe, io_req);
break;
@@ -2636,7 +2968,6 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
int i;
struct scsi_bd *pbl;
u64 *list;
- dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
@@ -2697,11 +3028,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
QEDF_PAGE_SIZE;
list = (u64 *)qedf->bdq_pbl_list;
- page = qedf->bdq_pbl_list_dma;
for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
*list = qedf->bdq_pbl_dma;
list++;
- page += QEDF_PAGE_SIZE;
}
return 0;
@@ -2711,7 +3040,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
{
u32 *list;
int i;
- int status = 0, rc;
+ int status;
u32 *pbl;
dma_addr_t page;
int num_pages;
@@ -2723,7 +3052,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
*/
if (!qedf->num_queues) {
QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
- return 1;
+ return -ENOMEM;
}
/*
@@ -2731,8 +3060,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = 1;
- goto mem_alloc_failure;
+ QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
+ return -EINVAL;
}
qedf->global_queues = kzalloc((sizeof(struct global_queue *)
@@ -2746,9 +3075,11 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
"qedf->global_queues=%p.\n", qedf->global_queues);
/* Allocate DMA coherent buffers for BDQ */
- rc = qedf_alloc_bdq(qedf);
- if (rc)
+ status = qedf_alloc_bdq(qedf);
+ if (status) {
+ QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
goto mem_alloc_failure;
+ }
/* Allocate a CQ and an associated PBL for each MSI-X vector */
for (i = 0; i < qedf->num_queues; i++) {
@@ -2958,13 +3289,15 @@ static struct pci_driver qedf_pci_driver = {
.id_table = qedf_pci_tbl,
.probe = qedf_probe,
.remove = qedf_remove,
+ .shutdown = qedf_shutdown,
+ .suspend = qedf_suspend,
};
static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -2974,12 +3307,16 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
void *task_start, *task_end;
struct qed_slowpath_params slowpath_params;
struct qed_probe_params qed_params;
- u16 tmp;
+ u16 retry_cnt = 10;
/*
* When doing error recovery we didn't reap the lport so don't try
* to reallocate it.
*/
+retry_probe:
+ if (mode == QEDF_MODE_RECOVERY)
+ msleep(2000);
+
if (mode != QEDF_MODE_RECOVERY) {
lport = libfc_host_alloc(&qedf_host_template,
sizeof(struct qedf_ctx));
@@ -2990,8 +3327,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err0;
}
+ fc_disc_init(lport);
+
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3005,6 +3345,8 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
+ mutex_init(&qedf->flush_mutex);
+ qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3014,9 +3356,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3029,12 +3374,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
qedf->io_mempool);
- sprintf(host_buf, "qedf_%u_link",
- qedf->lport->host->host_no);
- qedf->link_update_wq = create_workqueue(host_buf);
+ qedf->link_update_wq = alloc_workqueue("qedf_%u_link",
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 1, qedf->lport->host->host_no);
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) {
@@ -3057,6 +3403,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
if (!qedf->cdev) {
+ if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Retry %d initialize hardware\n", retry_cnt);
+ retry_cnt--;
+ goto retry_probe;
+ }
+ QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
rc = -ENODEV;
goto err1;
}
@@ -3068,6 +3421,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
goto err1;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+ qedf->dev_info.common.num_hwfns,
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
+
/* queue allocation code should come here
* order should be
* slowpath_start
@@ -3083,6 +3441,23 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+ goto err2;
+ }
+
+ if (mode != QEDF_MODE_RECOVERY) {
+ qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
+ if (IS_ERR(qedf->devlink)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ rc = PTR_ERR(qedf->devlink);
+ qedf->devlink = NULL;
+ goto err2;
+ }
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3100,12 +3475,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
/* Start the Slowpath-process */
+ memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params));
slowpath_params.int_mode = QED_INT_MODE_MSIX;
slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
- strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
+ strscpy(slowpath_params.name, "qedf", sizeof(slowpath_params.name));
rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
if (rc) {
QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
@@ -3120,8 +3496,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Setup interrupts */
rc = qedf_setup_int(qedf);
- if (rc)
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
goto err3;
+ }
rc = qed_ops->start(qedf->cdev, &qedf->tasks);
if (rc) {
@@ -3144,9 +3522,9 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
"Writing %d to primary and secondary BDQ doorbell registers.\n",
qedf->bdq_prod_idx);
writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
- tmp = readw(qedf->bdq_primary_prod);
+ readw(qedf->bdq_primary_prod);
writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
- tmp = readw(qedf->bdq_secondary_prod);
+ readw(qedf->bdq_secondary_prod);
qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
@@ -3181,11 +3559,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
sprintf(host_buf, "host_%d", host->host_no);
qed_ops->common->set_name(qedf->cdev, host_buf);
-
- /* Set xid max values */
- qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
- qedf->max_els_xid = QEDF_MAX_ELS_XID;
-
/* Allocate cmd mgr */
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
@@ -3196,22 +3569,26 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (mode != QEDF_MODE_RECOVERY) {
host->transportt = qedf_fc_transport_template;
- host->can_queue = QEDF_MAX_ELS_XID;
host->max_lun = qedf_max_lun;
host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ host->max_id = QEDF_MAX_SESSIONS;
+ host->can_queue = FCOE_PARAMS_NUM_TASKS;
rc = scsi_add_host(host, &pdev->dev);
- if (rc)
+ if (rc) {
+ QEDF_WARN(&qedf->dbg_ctx,
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
goto err6;
+ }
}
memset(&params, 0, sizeof(params));
- params.mtu = 9000;
+ params.mtu = QEDF_LL2_BUF_SIZE;
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
- snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
- qedf->ll2_recv_wq =
- create_workqueue(host_buf);
+ qedf->ll2_recv_wq = alloc_workqueue("qedf_%d_ll2",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ host->host_no);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
rc = -ENOMEM;
@@ -3252,9 +3629,9 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
}
- sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
- qedf->timer_work_queue =
- create_workqueue(host_buf);
+ qedf->timer_work_queue = alloc_workqueue("qedf_%u_timer",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ qedf->lport->host->host_no);
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
@@ -3266,8 +3643,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (mode != QEDF_MODE_RECOVERY) {
sprintf(host_buf, "qedf_%u_dpc",
qedf->lport->host->host_no);
- qedf->dpc_wq = create_workqueue(host_buf);
+ qedf->dpc_wq =
+ alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3315,6 +3695,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3352,6 +3736,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
int rc;
+ int cnt = 0;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3369,6 +3754,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
return;
}
+stag_in_prog:
+ if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt);
+ cnt++;
+
+ if (cnt < 5) {
+ msleep(500);
+ goto stag_in_prog;
+ }
+ }
+
if (mode != QEDF_MODE_RECOVERY)
set_bit(QEDF_UNLOADING, &qedf->flags);
@@ -3377,7 +3773,9 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
fcoe_ctlr_link_down(&qedf->ctlr);
else
fc_fabric_logoff(qedf->lport);
- qedf_wait_for_upload(qedf);
+
+ if (!qedf_wait_for_upload(qedf))
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
#ifdef CONFIG_DEBUG_FS
qedf_dbg_host_exit(&(qedf->dbg_ctx));
@@ -3452,6 +3850,11 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
QEDF_ERR(&(qedf->dbg_ctx),
"Failed to send drv state to MFW.\n");
+ if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
+ qed_ops->common->devlink_unregister(qedf->devlink);
+ qedf->devlink = NULL;
+ }
+
qed_ops->common->slowpath_stop(qedf->cdev);
qed_ops->common->remove(qedf->cdev);
@@ -3480,6 +3883,45 @@ void qedf_wq_grcdump(struct work_struct *work)
qedf_capture_grc_dump(qedf);
}
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Hardware error handler scheduled, event=%d.\n",
+ err_type);
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Already in recovery, not scheduling board disable work.\n");
+ return;
+ }
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedf->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+ break;
+ case QED_HW_ERR_RAMROD_FAIL:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+
+ if (qedf_enable_recovery && qedf->devlink)
+ qed_ops->common->report_fatal_error(qedf->devlink,
+ err_type);
+
+ break;
+ default:
+ break;
+ }
+}
+
/*
* Protocol TLV handler
*/
@@ -3487,11 +3929,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
@@ -3562,6 +4018,91 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Already is in recovery, hence not calling software context reset.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n");
+ return;
+ }
+
+ set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags);
+
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
+ qedf_ctx_soft_reset(qedf->lport);
+}
+
+static void qedf_shutdown(struct pci_dev *pdev)
+{
+ __qedf_remove(pdev, QEDF_MODE_NORMAL);
+}
+
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct qedf_ctx *qedf;
+
+ if (!pdev) {
+ QEDF_ERR(NULL, "pdev is NULL.\n");
+ return -ENODEV;
+ }
+
+ qedf = pci_get_drvdata(pdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
+
+ return -EPERM;
+}
+
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
@@ -3640,7 +4181,8 @@ static int __init qedf_init(void)
goto err3;
}
- qedf_io_wq = create_workqueue("qedf_io_wq");
+ qedf_io_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 1,
+ "qedf_io_wq");
if (!qedf_io_wq) {
QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
goto err4;
@@ -3688,7 +4230,7 @@ static void __exit qedf_cleanup(void)
}
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
MODULE_AUTHOR("QLogic Corporation");
MODULE_VERSION(QEDF_VERSION);
module_init(qedf_init);