summaryrefslogtreecommitdiff
path: root/drivers/scsi/ibmvscsi/ibmvfc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2642
1 files changed, 2111 insertions, 531 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index cc4e05be8d4a..228daffb286d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
*
* Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) IBM Corporation, 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -27,6 +13,7 @@
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/of.h>
@@ -50,10 +37,18 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 max_sectors = IBMVFC_MAX_SECTORS;
+static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
+static unsigned int mq_enabled = IBMVFC_MQ;
+static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
+static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
+static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
+static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
+
static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;
@@ -63,6 +58,22 @@ MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+module_param_named(mq, mq_enabled, uint, S_IRUGO);
+MODULE_PARM_DESC(mq, "Enable multiqueue support. "
+ "[Default=" __stringify(IBMVFC_MQ) "]");
+module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
+ "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
+module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
+ "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
+module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
+module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
+
module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
"[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
@@ -73,6 +84,12 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
+MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
+module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
+MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
+ "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
module_param_named(max_lun, max_lun, ullong, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
"[Default=" __stringify(IBMVFC_MAX_LUN) "]");
@@ -139,6 +156,7 @@ static const struct {
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+ { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
};
static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -146,9 +164,53 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
static const char *unknown_error = "unknown error";
+static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
+ unsigned long length, unsigned long *cookie,
+ unsigned long *irq)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
+ *cookie = retbuf[0];
+ *irq = retbuf[1];
+
+ return rc;
+}
+
+static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
+{
+ u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
+
+ return (host_caps & cap_flags) ? 1 : 0;
+}
+
+static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.iu;
+ else
+ return &vfc_cmd->v1.iu;
+}
+
+static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
+ struct ibmvfc_cmd *vfc_cmd)
+{
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ return &vfc_cmd->v2.rsp;
+ else
+ return &vfc_cmd->v1.rsp;
+}
+
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_trc_start - Log a start trace entry
@@ -160,9 +222,11 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
- entry = &vhost->trace[vhost->trace_index++];
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -170,18 +234,18 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
- entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
+ entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
break;
default:
break;
- };
+ }
}
/**
@@ -194,8 +258,12 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
- struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
+ struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -203,15 +271,15 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
- entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->op_code = iu->cdb[0];
entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
- entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
- entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->lun = scsilun_to_int(&iu->lun);
+ entry->tmf_flags = iu->tmf_flags;
entry->u.end.status = be16_to_cpu(vfc_cmd->status);
entry->u.end.error = be16_to_cpu(vfc_cmd->error);
- entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
- entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
- entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+ entry->u.end.fcp_rsp_flags = rsp->flags;
+ entry->u.end.rsp_code = rsp->data.info.rsp_code;
+ entry->u.end.scsi_status = rsp->scsi_status;
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = be32_to_cpu(mad->opcode);
@@ -220,7 +288,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -266,15 +334,16 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
/**
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vhost: ibmvfc host struct
* @vfc_cmd: ibmvfc command struct
*
* Return value:
* SCSI result value to return for completed command
**/
-static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
{
int err;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
@@ -426,22 +495,59 @@ static const char *ibmvfc_get_fc_type(u16 status)
* @tgt: ibmvfc target struct
* @action: action to perform
*
+ * Returns:
+ * 0 if action changed / non-zero if not changed
**/
-static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
enum ibmvfc_target_action action)
{
+ int rc = -EINVAL;
+
switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
+ action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
+ action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DEL_RPORT:
- if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
+ if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
tgt->action = action;
+ rc = 0;
+ }
+ break;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
- tgt->add_rport = 0;
tgt->action = action;
+ rc = 0;
break;
}
+
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ tgt->add_rport = 0;
+
+ return rc;
}
/**
@@ -464,7 +570,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,14 +606,23 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
+ case IBMVFC_HOST_ACTION_REENABLE:
+ case IBMVFC_HOST_ACTION_RESET:
+ vhost->action = action;
+ break;
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_LOGO:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ case IBMVFC_HOST_ACTION_NONE:
+ default:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
@@ -515,18 +630,9 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
- break;
- case IBMVFC_HOST_ACTION_LOGO:
- case IBMVFC_HOST_ACTION_QUERY_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
- case IBMVFC_HOST_ACTION_NONE:
- case IBMVFC_HOST_ACTION_RESET:
- case IBMVFC_HOST_ACTION_REENABLE:
- default:
- vhost->action = action;
+ }
break;
- };
+ }
}
/**
@@ -538,7 +644,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
**/
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
- if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
+ vhost->state == IBMVFC_ACTIVE) {
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
@@ -550,6 +657,19 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_del_tgt - Schedule cleanup and removal of the target
+ * @tgt: ibmvfc target struct
+ **/
+static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
+{
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
+ tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ tgt->init_retries = 0;
+ }
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
* ibmvfc_link_down - Handle a link down event from the adapter
* @vhost: ibmvfc host struct
* @state: ibmvfc host state to enter
@@ -563,7 +683,7 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost,
ENTER;
scsi_block_requests(vhost->host);
list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_set_host_state(vhost, state);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
@@ -592,11 +712,16 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
}
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
- memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (vhost->client_migrated)
+ tgt->need_login = 1;
+ else
+ ibmvfc_del_tgt(tgt);
+ }
+
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -619,6 +744,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
+static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
+ u64 word2, u64 word3, u64 word4)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
+ word1, word2, word3, word4);
+}
+
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
@@ -646,6 +780,115 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ if (!queue->total_depth)
+ return 0;
+
+ pool->size = queue->total_depth;
+ pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage = dma_alloc_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&queue->sent);
+ INIT_LIST_HEAD(&queue->free);
+ queue->evt_free = queue->evt_depth;
+ queue->reserved_free = queue->reserved_depth;
+ spin_lock_init(&queue->l_lock);
+
+ for (i = 0; i < pool->size; ++i) {
+ struct ibmvfc_event *evt = &pool->events[i];
+
+ /*
+ * evt->active states
+ * 1 = in flight
+ * 0 = being completed
+ * -1 = free/freed
+ */
+ atomic_set(&evt->active, -1);
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->vhost = vhost;
+ evt->queue = queue;
+ evt->ext_list = NULL;
+ list_add_tail(&evt->queue_list, &queue->free);
+ }
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost: ibmvfc host who owns the event pool
+ * @queue: ibmvfc queue struct
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ for (i = 0; i < pool->size; ++i) {
+ list_del(&pool->events[i].queue_list);
+ BUG_ON(atomic_read(&pool->events[i].free) != 1);
+ if (pool->events[i].ext_list)
+ dma_pool_free(vhost->sg_pool,
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+
+ kfree(pool->events);
+ dma_free_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_free_queue - Deallocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue struct
+ *
+ * Unmaps dma and deallocates page for messages
+ **/
+static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ struct device *dev = vhost->dev;
+
+ dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+
+ ibmvfc_free_event_pool(vhost, queue);
+}
+
+/**
* ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
* @vhost: ibmvfc host struct
*
@@ -656,7 +899,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
@@ -669,8 +912,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)crq->msgs);
+
+ ibmvfc_free_queue(vhost, crq);
}
/**
@@ -684,6 +927,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ unsigned long flags;
+
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
/* Re-enable the CRQ */
do {
@@ -695,6 +941,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
if (rc)
dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
+ vhost->do_enquiry = 1;
+ vhost->using_channels = 0;
+ spin_unlock(vhost->crq.q_lock);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
+
return rc;
}
@@ -710,7 +965,9 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
int rc = 0;
unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
+
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
/* Close the CRQ */
do {
@@ -720,11 +977,14 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
+ vhost->do_enquiry = 1;
+ vhost->using_channels = 0;
/* Clean out the queue */
- memset(crq->msgs, 0, PAGE_SIZE);
+ memset(crq->msgs.crq, 0, PAGE_SIZE);
crq->cur = 0;
/* And re-open it again */
@@ -736,8 +996,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
+
return rc;
}
@@ -767,12 +1031,24 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
**/
static void ibmvfc_free_event(struct ibmvfc_event *evt)
{
- struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_event_pool *pool = &vhost->pool;
+ struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
+ unsigned long flags;
BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1);
- list_add_tail(&evt->queue, &vhost->free);
+ BUG_ON(atomic_dec_and_test(&evt->active));
+
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->free);
+ if (evt->reserved) {
+ evt->reserved = 0;
+ evt->queue->reserved_free++;
+ } else {
+ evt->queue->evt_free++;
+ }
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
}
/**
@@ -788,16 +1064,31 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
if (cmnd) {
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
/**
+ * ibmvfc_complete_purge - Complete failed command list
+ * @purge_list: list head of failed commands
+ *
+ * This function runs completions on commands to fail as a result of a
+ * host reset or platform migration.
+ **/
+static void ibmvfc_complete_purge(struct list_head *purge_list)
+{
+ struct ibmvfc_event *evt, *pos;
+
+ list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+/**
* ibmvfc_fail_request - Fail request with specified error code
* @evt: ibmvfc event struct
* @error_code: error code to fail request with
@@ -807,16 +1098,19 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
**/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{
+ /*
+ * Anything we are failing should still be active. Otherwise, it
+ * implies we already got a response for the command and are doing
+ * something bad like double completing it.
+ */
+ BUG_ON(!atomic_dec_and_test(&evt->active));
if (evt->cmnd) {
evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done;
} else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
- list_del(&evt->queue);
- del_timer(&evt->timer);
- ibmvfc_trc_end(evt);
- evt->done(evt);
+ timer_delete(&evt->timer);
}
/**
@@ -830,10 +1124,30 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{
struct ibmvfc_event *evt, *pos;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int hwqs = 0;
+ int i;
+
+ if (vhost->using_channels)
+ hwqs = vhost->scsi_scrqs.active_queues;
ibmvfc_dbg(vhost, "Purging all requests\n");
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+ spin_lock_irqsave(&vhost->crq.l_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&vhost->crq.sent, &vhost->purge);
+ spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+
+ for (i = 0; i < hwqs; i++) {
+ spin_lock_irqsave(queues[i].q_lock, flags);
+ spin_lock(&queues[i].l_lock);
+ list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
+ ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&queues[i].sent, &vhost->purge);
+ spin_unlock(&queues[i].l_lock);
+ spin_unlock_irqrestore(queues[i].q_lock, flags);
+ }
}
/**
@@ -1137,6 +1451,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
/**
* ibmvfc_gather_partition_info - Gather info about the LPAR
+ * @vhost: ibmvfc host struct
*
* Return value:
* none
@@ -1153,7 +1468,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (name)
- strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+ strscpy(vhost->partition_name, name, sizeof(vhost->partition_name));
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
vhost->partition_number = *num;
@@ -1170,13 +1485,20 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct device_node *of_node = vhost->dev->of_node;
const char *location;
+ u16 max_cmds;
+
+ max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+ if (mq_enabled)
+ max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
+ vhost->scsi_scrqs.desired_queues;
memset(login_info, 0, sizeof(*login_info));
login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
- login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
login_info->partition_num = cpu_to_be32(vhost->partition_number);
@@ -1186,102 +1508,75 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
if (vhost->client_migrated)
login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
- login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
- login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
+ login_info->max_cmds = cpu_to_be32(max_cmds);
+ login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
+
+ if (vhost->mq_enabled || vhost->using_channels)
+ login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
+
login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
- login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
- strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
- strncpy(login_info->device_name,
- dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
+ login_info->async.len = cpu_to_be32(async_crq->size *
+ sizeof(*async_crq->msgs.async));
+ strscpy(login_info->partition_name, vhost->partition_name,
+ sizeof(login_info->partition_name));
+
+ strscpy(login_info->device_name,
+ dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name));
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(vhost->dev);
- strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+ strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
}
/**
- * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
- * @vhost: ibmvfc host who owns the event pool
+ * __ibmvfc_get_event - Gets the next free event in pool
+ * @queue: ibmvfc queue struct
+ * @reserved: event is for a reserved management command
*
- * Returns zero on success.
+ * Returns a free event from the pool.
**/
-static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
- pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
- if (!pool->events)
- return -ENOMEM;
-
- pool->iu_storage = dma_alloc_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- &pool->iu_token, 0);
-
- if (!pool->iu_storage) {
- kfree(pool->events);
- return -ENOMEM;
- }
+ struct ibmvfc_event *evt = NULL;
+ unsigned long flags;
- for (i = 0; i < pool->size; ++i) {
- struct ibmvfc_event *evt = &pool->events[i];
- atomic_set(&evt->free, 1);
- evt->crq.valid = 0x80;
- evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
- evt->xfer_iu = pool->iu_storage + i;
- evt->vhost = vhost;
- evt->ext_list = NULL;
- list_add_tail(&evt->queue, &vhost->free);
+ spin_lock_irqsave(&queue->l_lock, flags);
+ if (reserved && queue->reserved_free) {
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ evt->reserved = 1;
+ queue->reserved_free--;
+ } else if (queue->evt_free) {
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ queue->evt_free--;
+ } else {
+ goto out;
}
- LEAVE;
- return 0;
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+out:
+ spin_unlock_irqrestore(&queue->l_lock, flags);
+ return evt;
}
-/**
- * ibmvfc_free_event_pool - Frees memory of the event pool of a host
- * @vhost: ibmvfc host who owns the event pool
- *
- **/
-static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
-{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- for (i = 0; i < pool->size; ++i) {
- list_del(&pool->events[i].queue);
- BUG_ON(atomic_read(&pool->events[i].free) != 1);
- if (pool->events[i].ext_list)
- dma_pool_free(vhost->sg_pool,
- pool->events[i].ext_list,
- pool->events[i].ext_list_token);
- }
-
- kfree(pool->events);
- dma_free_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- pool->iu_storage, pool->iu_token);
- LEAVE;
-}
+#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
+#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
/**
- * ibmvfc_get_event - Gets the next free event in pool
- * @vhost: ibmvfc host struct
+ * ibmvfc_locked_done - Calls evt completion with host_lock held
+ * @evt: ibmvfc evt to complete
*
- * Returns a free event from the pool.
+ * All non-scsi command completion callbacks have the expectation that the
+ * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
+ * MAD evt with the host_lock.
**/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+static void ibmvfc_locked_done(struct ibmvfc_event *evt)
{
- struct ibmvfc_event *evt;
+ unsigned long flags;
- BUG_ON(list_empty(&vhost->free));
- evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
- atomic_set(&evt->free, 0);
- list_del(&evt->queue);
- return evt;
+ spin_lock_irqsave(evt->vhost->host->host_lock, flags);
+ evt->_done(evt);
+ spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
}
/**
@@ -1296,9 +1591,15 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
{
evt->cmnd = NULL;
evt->sync_iu = NULL;
- evt->crq.format = format;
- evt->done = done;
evt->eh_comp = NULL;
+ evt->crq.format = format;
+ if (format == IBMVFC_CMD_FORMAT)
+ evt->done = done;
+ else {
+ evt->_done = done;
+ evt->done = ibmvfc_locked_done;
+ }
+ evt->hwq = 0;
}
/**
@@ -1321,8 +1622,8 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
}
/**
- * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
- * @scmd: Scsi_Cmnd with the scatterlist
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
+ * @scmd: struct scsi_cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
@@ -1338,6 +1639,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
int sg_mapped;
struct srp_direct_buf *data = &vfc_cmd->ioba;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
if (cls3_error)
vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
@@ -1354,10 +1656,10 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+ iu->add_cdb_len |= IBMVFC_WRDATA;
} else {
vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
- vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+ iu->add_cdb_len |= IBMVFC_RDDATA;
}
if (sg_mapped == 1) {
@@ -1389,12 +1691,13 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
/**
* ibmvfc_timeout - Internal command timeout handler
- * @evt: struct ibmvfc_event that timed out
+ * @t: struct ibmvfc_event that timed out
*
* Called when an internally generated command times out
**/
-static void ibmvfc_timeout(struct ibmvfc_event *evt)
+static void ibmvfc_timeout(struct timer_list *t)
{
+ struct ibmvfc_event *evt = timer_container_of(evt, t, timer);
struct ibmvfc_host *vhost = evt->vhost;
dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
ibmvfc_reset_host(vhost);
@@ -1412,6 +1715,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout)
{
__be64 *crq_as_u64 = (__be64 *) &evt->crq;
+ unsigned long flags;
int rc;
/* Copy the IU into the transfer area */
@@ -1423,22 +1727,34 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
else
BUG();
- list_add_tail(&evt->queue, &vhost->sent);
- init_timer(&evt->timer);
+ timer_setup(&evt->timer, ibmvfc_timeout, 0);
if (timeout) {
- evt->timer.data = (unsigned long) evt;
evt->timer.expires = jiffies + (timeout * HZ);
- evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
add_timer(&evt->timer);
}
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->sent);
+ atomic_set(&evt->active, 1);
+
mb();
- if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
- be64_to_cpu(crq_as_u64[1])))) {
- list_del(&evt->queue);
- del_timer(&evt->timer);
+ if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
+ rc = ibmvfc_send_sub_crq(vhost,
+ evt->queue->vios_cookie,
+ be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]),
+ 0, 0);
+ else
+ rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
+ atomic_set(&evt->active, 0);
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
+ timer_delete(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
@@ -1462,8 +1778,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
evt->done(evt);
- } else
+ } else {
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
+ }
return 0;
}
@@ -1477,7 +1795,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
const char *err = unknown_error;
int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
@@ -1495,9 +1813,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = rsp->data.info.rsp_code;
- scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
- cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
@@ -1511,15 +1829,18 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_target *tgt;
+ unsigned long flags;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
}
}
ibmvfc_reinit_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
@@ -1531,7 +1852,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
- struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
struct scsi_cmnd *cmnd = evt->cmnd;
u32 rsp_len = 0;
u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
@@ -1545,7 +1866,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
scsi_set_resid(cmnd, 0);
if (vfc_cmd->status) {
- cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+ cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
@@ -1568,12 +1889,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->result = (DID_ERROR << 16);
scsi_dma_unmap(cmnd);
- cmnd->scsi_done(cmnd);
+ scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
@@ -1602,58 +1920,95 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
+static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+ struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+ struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
+ size_t offset;
+
+ memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ offset = offsetof(struct ibmvfc_cmd, v2.rsp);
+ vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
+ } else
+ offset = offsetof(struct ibmvfc_cmd, v1.rsp);
+ vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
+ vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
+ vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
+ vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &iu->lun);
+
+ return vfc_cmd;
+}
+
/**
* ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @shost: scsi host struct
* @cmnd: struct scsi_cmnd to be executed
- * @done: Callback function to be called when cmnd is completed
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done) (struct scsi_cmnd *))
+static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
- struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+ struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
+ struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
+ u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
+ u16 scsi_channel;
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
- done(cmnd);
+ scsi_done(cmnd);
return 0;
}
cmnd->result = (DID_OK << 16);
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+ if (!evt)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+ } else {
+ evt = ibmvfc_get_event(&vhost->crq);
+ if (!evt)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
- cmnd->scsi_done = done;
- vfc_cmd = &evt->iu.cmd;
- memset(vfc_cmd, 0, sizeof(*vfc_cmd));
- vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
- vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
- vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
- vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
- vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
- int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
- memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
+ iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
+
+ iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
+ memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
if (cmnd->flags & SCMD_TAGGED) {
- vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
- vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+ vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
+ iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
}
+ vfc_cmd->correlation = cpu_to_be64((u64)evt);
+
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
@@ -1666,12 +2021,10 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
- done(cmnd);
+ scsi_done(cmnd);
return 0;
}
-static DEF_SCSI_QCMD(ibmvfc_queuecommand)
-
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
@@ -1725,7 +2078,12 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
}
vhost->aborting_passthru = 1;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return -ENOMEM;
+ }
+
ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
@@ -1783,7 +2141,11 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
if (unlikely((rc = ibmvfc_host_chkready(vhost))))
goto unlock_out;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ rc = -ENOMEM;
+ goto unlock_out;
+ }
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
@@ -1844,6 +2206,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
(bsg_request->rqst_data.h_els.port_id[1] << 8) |
bsg_request->rqst_data.h_els.port_id[2];
+ fallthrough;
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
@@ -1852,12 +2215,13 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
bsg_request->rqst_data.h_ct.port_id[2];
+ fallthrough;
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -1899,7 +2263,12 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
goto out;
}
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ rc = -ENOMEM;
+ goto out;
+ }
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
mad = &evt->iu.passthru;
@@ -1975,28 +2344,32 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rsp_rc = -EBUSY;
unsigned long flags;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels)
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
+ if (!evt) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return -ENOMEM;
+ }
+
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = type;
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = type;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
@@ -2014,7 +2387,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
wait_for_completion(&evt->comp);
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2023,7 +2396,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2038,7 +2411,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
/**
* ibmvfc_match_rport - Match function for specified remote port
* @evt: ibmvfc event struct
- * @device: device to match (rport)
+ * @rport: device to match
*
* Returns:
* 1 if event matches rport / 0 if event does not match rport
@@ -2086,6 +2459,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
}
/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt: ibmvfc event struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_event *loop_evt;
+
+ list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+ if (loop_evt == evt)
+ return true;
+
+ return false;
+}
+
+/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
@@ -2099,19 +2490,35 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
- int wait;
+ int wait, i, q_index, q_size;
unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+ struct ibmvfc_queue *queues;
ENTER;
+ if (vhost->mq_enabled && vhost->using_channels) {
+ queues = vhost->scsi_scrqs.scrqs;
+ q_size = vhost->scsi_scrqs.active_queues;
+ } else {
+ queues = &vhost->crq;
+ q_size = 1;
+ }
+
do {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = &comp;
- wait++;
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -2121,11 +2528,18 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
if (!timeout) {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
- if (match(evt, device)) {
- evt->eh_comp = NULL;
- wait++;
+ for (q_index = 0; q_index < q_size; q_index++) {
+ spin_lock(&queues[q_index].l_lock);
+ for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+ evt = &queues[q_index].evt_pool.events[i];
+ if (!ibmvfc_event_is_free(evt)) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
}
+ spin_unlock(&queues[q_index].l_lock);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
@@ -2140,23 +2554,130 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
return SUCCESS;
}
-/**
- * ibmvfc_cancel_all - Cancel all outstanding commands to the device
- * @sdev: scsi device to cancel commands
- * @type: type of error recovery being performed
- *
- * This sends a cancel to the VIOS for the specified device. This does
- * NOT send any abort to the actual device. That must be done separately.
- *
- * Returns:
- * 0 on success / other on failure
- **/
-static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct scsi_device *sdev,
+ int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
+ struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_reserved_event(queue);
+ if (!evt)
+ return NULL;
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->lun);
+ if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
+ type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+ if (vhost->state == IBMVFC_ACTIVE)
+ tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
+ else
+ tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
+
+ init_completion(&evt->comp);
+
+ return evt;
+}
+
+static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct ibmvfc_event *evt, *found_evt, *temp;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int num_hwq, i;
+ int fail = 0;
+ LIST_HEAD(cancelq);
+ u16 status;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ num_hwq = vhost->scsi_scrqs.active_queues;
+ for (i = 0; i < num_hwq; i++) {
+ spin_lock(queues[i].q_lock);
+ spin_lock(&queues[i].l_lock);
+ found_evt = NULL;
+ list_for_each_entry(evt, &queues[i].sent, queue_list) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+ spin_unlock(&queues[i].l_lock);
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
+ if (!evt) {
+ spin_unlock(queues[i].q_lock);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return -ENOMEM;
+ }
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+ }
+
+ spin_unlock(queues[i].q_lock);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (list_empty(&cancelq)) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+ return 0;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+ list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
+ wait_for_completion(&evt->comp);
+ status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
+ list_del(&evt->cancel);
+ ibmvfc_free_event(evt);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ * the caller will wait for the command being cancelled to get returned
+ */
+ break;
+ default:
+ fail = 1;
+ break;
+ }
+ }
+ }
+
+ if (fail)
+ return -EIO;
+
+ sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+ LEAVE;
+ return 0;
+}
+
+static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
@@ -2164,14 +2685,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
u16 status;
ENTER;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2181,27 +2704,8 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
if (vhost->logged_in) {
- evt = ibmvfc_get_event(vhost);
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
-
- tmf = &evt->iu.tmf;
- memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
- tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
- tmf->common.length = cpu_to_be16(sizeof(*tmf));
- tmf->scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->lun);
- if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
- type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
- if (vhost->state == IBMVFC_ACTIVE)
- tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
- else
- tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
-
+ evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
evt->sync_iu = &rsp;
- init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2241,6 +2745,27 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev: scsi device to cancel commands
+ * @type: type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+
+ if (vhost->mq_enabled && vhost->using_channels)
+ return ibmvfc_cancel_all_mq(sdev, type);
+ else
+ return ibmvfc_cancel_all_sq(sdev, type);
+}
+
+/**
* ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
* @key: cancel key to match
@@ -2290,19 +2815,22 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
- struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ struct ibmvfc_fcp_cmd_iu *iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
int rc, rsp_rc = -EBUSY;
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2312,23 +2840,23 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
+ if (!evt) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return -ENOMEM;
+ }
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+ tmf = ibmvfc_init_vfc_cmd(evt, sdev);
+ iu = ibmvfc_get_fcp_iu(vhost, tmf);
- tmf = &evt->iu.cmd;
- memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
- tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
- tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
- tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
- tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
+ tmf->correlation = cpu_to_be64((u64)evt);
+
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2373,7 +2901,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (rsp_iu.cmd.status)
- rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
+ rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
@@ -2382,7 +2910,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2474,18 +3002,6 @@ static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
}
/**
- * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
- * @sdev: scsi device struct
- * @data: return code
- *
- **/
-static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
-{
- unsigned long *rc = data;
- *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
-}
-
-/**
* ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct
*
@@ -2494,22 +3010,38 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
- struct ibmvfc_host *vhost = shost_priv(sdev->host);
- struct scsi_target *starget = scsi_target(sdev);
+ struct scsi_target *starget = scsi_target(cmd->device);
+ struct fc_rport *rport = starget_to_rport(starget);
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct ibmvfc_host *vhost = shost_priv(shost);
int block_rc;
int reset_rc = 0;
int rc = FAILED;
unsigned long cancel_rc = 0;
+ bool tgt_reset = false;
ENTER;
- block_rc = fc_block_scsi_eh(cmd);
+ block_rc = fc_block_rport(rport);
ibmvfc_wait_while_resetting(vhost);
if (block_rc != FAST_IO_FAIL) {
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
- reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ if ((sdev->channel != starget->channel) ||
+ (sdev->id != starget->id))
+ continue;
+
+ cancel_rc |= ibmvfc_cancel_all(sdev,
+ IBMVFC_TMF_TGT_RESET);
+ if (!tgt_reset) {
+ reset_rc = ibmvfc_reset_device(sdev,
+ IBMVFC_TARGET_RESET, "target");
+ tgt_reset = true;
+ }
+ }
} else
- starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
+ starget_for_each_device(starget, &cancel_rc,
+ ibmvfc_dev_cancel_all_noreset);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
@@ -2528,16 +3060,12 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
**/
static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int rc, block_rc;
+ int rc;
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
- block_rc = fc_block_scsi_eh(cmd);
dev_err(vhost->dev, "Resetting connection due to error recovery\n");
rc = ibmvfc_issue_fc_host_lip(vhost->host);
- if (block_rc == FAST_IO_FAIL)
- return FAST_IO_FAIL;
-
return rc ? FAILED : SUCCESS;
}
@@ -2554,7 +3082,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *dev_rport;
struct scsi_device *sdev;
- unsigned long rc;
+ struct ibmvfc_target *tgt;
+ unsigned long rc, flags;
+ unsigned int found;
ENTER;
shost_for_each_device(sdev, shost) {
@@ -2568,6 +3098,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ found = 0;
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == rport->port_id) {
+ found++;
+ break;
+ }
+ }
+
+ if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * If we get here, that means we previously attempted to send
+ * an implicit logout to the target but it failed, most likely
+ * due to I/O being pending, so we need to send it again
+ */
+ ibmvfc_del_tgt(tgt);
+ ibmvfc_reinit_host(vhost);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
LEAVE;
}
@@ -2666,7 +3217,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2702,7 +3253,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
ibmvfc_reinit_host(vhost);
}
}
@@ -2720,16 +3271,18 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
* @crq: Command/Response queue
* @vhost: ibmvfc host struct
+ * @evt_doneq: Event done queue
*
- **/
-static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+**/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
{
long rc;
struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
@@ -2760,16 +3313,21 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
- dev_info(vhost->dev, "Re-enabling adapter\n");
+ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
+
+ scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
- } else {
- dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+ wake_up(&vhost->work_wait_q);
+ } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+ dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+ } else {
+ dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
}
return;
case IBMVFC_CRQ_CMD_RSP:
@@ -2786,22 +3344,21 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
* things we send. Make sure this response is to something we
* actually sent
*/
- if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+ if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
crq->ioba);
return;
}
- if (unlikely(atomic_read(&evt->free))) {
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba);
return;
}
- del_timer(&evt->timer);
- list_del(&evt->queue);
- ibmvfc_trc_end(evt);
- evt->done(evt);
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
}
/**
@@ -2819,20 +3376,24 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
int done = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (time >= (init_timeout * HZ)) {
+ if (!vhost->scan_timeout)
+ done = 1;
+ else if (time >= (vhost->scan_timeout * HZ)) {
dev_info(vhost->dev, "Scan taking longer than %d seconds, "
- "continuing initialization\n", init_timeout);
+ "continuing initialization\n", vhost->scan_timeout);
done = 1;
}
- if (vhost->scan_complete)
+ if (vhost->scan_complete) {
+ vhost->scan_timeout = init_timeout;
done = 1;
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
}
/**
- * ibmvfc_slave_alloc - Setup the device's task set value
+ * ibmvfc_sdev_init - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
@@ -2841,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
-static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+static int ibmvfc_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2880,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
}
/**
- * ibmvfc_slave_configure - Configure the device
+ * ibmvfc_sdev_configure - Configure the device
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
@@ -2889,14 +3451,17 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
* Returns:
* 0
**/
-static int ibmvfc_slave_configure(struct scsi_device *sdev)
+static int ibmvfc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (sdev->type == TYPE_DISK)
+ if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
+ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+ }
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
@@ -2905,7 +3470,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
* ibmvfc_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
- * @reason: calling context
*
* Return value:
* actual depth set
@@ -2924,8 +3488,7 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.partition_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name);
}
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
@@ -2934,8 +3497,7 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.device_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name);
}
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
@@ -2944,8 +3506,7 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.port_loc_code);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code);
}
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
@@ -2954,8 +3515,7 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- vhost->login_buf->resp.drc_name);
+ return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name);
}
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
@@ -2963,7 +3523,8 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+ return sysfs_emit(buf, "%d\n",
+ be32_to_cpu(vhost->login_buf->resp.version));
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
@@ -2971,12 +3532,14 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
- return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+ return sysfs_emit(buf, "%llx\n",
+ be64_to_cpu(vhost->login_buf->resp.capabilities));
}
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
*
* Return value:
@@ -2991,7 +3554,7 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
int len;
spin_lock_irqsave(shost->host_lock, flags);
- len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+ len = sysfs_emit(buf, "%d\n", vhost->log_level);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
@@ -2999,7 +3562,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
/**
* ibmvfc_store_log_level - Change the adapter's error logging level
* @dev: class device struct
+ * @attr: unused
* @buf: buffer
+ * @count: buffer size
*
* Return value:
* number of bytes printed to buffer
@@ -3018,6 +3583,39 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
return strlen(buf);
}
+static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
+ unsigned long flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ len = sysfs_emit(buf, "%d\n", scsi->desired_queues);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return len;
+}
+
+static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
+ unsigned long flags = 0;
+ unsigned int channels;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ channels = simple_strtoul(buf, NULL, 10);
+ scsi->desired_queues = min(channels, shost->nr_hw_queues);
+ ibmvfc_hard_reset_host(vhost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return strlen(buf);
+}
+
static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
@@ -3026,6 +3624,8 @@ static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
ibmvfc_show_log_level, ibmvfc_store_log_level);
+static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
+ ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
@@ -3041,10 +3641,10 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
@@ -3064,7 +3664,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ibmvfc_trace_attr = {
+static const struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
@@ -3074,18 +3674,21 @@ static struct bin_attribute ibmvfc_trace_attr = {
};
#endif
-static struct device_attribute *ibmvfc_attrs[] = {
- &dev_attr_partition_name,
- &dev_attr_device_name,
- &dev_attr_port_loc_code,
- &dev_attr_drc_name,
- &dev_attr_npiv_version,
- &dev_attr_capabilities,
- &dev_attr_log_level,
+static struct attribute *ibmvfc_host_attrs[] = {
+ &dev_attr_partition_name.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_port_loc_code.attr,
+ &dev_attr_drc_name.attr,
+ &dev_attr_npiv_version.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_log_level.attr,
+ &dev_attr_nr_scsi_channels.attr,
NULL
};
-static struct scsi_host_template driver_template = {
+ATTRIBUTE_GROUPS(ibmvfc_host);
+
+static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
.proc_name = IBMVFC_NAME,
@@ -3095,8 +3698,8 @@ static struct scsi_host_template driver_template = {
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
- .slave_alloc = ibmvfc_slave_alloc,
- .slave_configure = ibmvfc_slave_configure,
+ .sdev_init = ibmvfc_sdev_init,
+ .sdev_configure = ibmvfc_sdev_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
@@ -3105,8 +3708,7 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
- .use_clustering = ENABLE_CLUSTERING,
- .shost_attrs = ibmvfc_attrs,
+ .shost_groups = ibmvfc_host_groups,
.track_queue_depth = 1,
};
@@ -3119,10 +3721,10 @@ static struct scsi_host_template driver_template = {
**/
static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct ibmvfc_async_crq *crq;
- crq = &async_crq->msgs[async_crq->cur];
+ crq = &async_crq->msgs.async[async_crq->cur];
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
@@ -3142,10 +3744,10 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
**/
static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_crq_queue *queue = &vhost->crq;
+ struct ibmvfc_queue *queue = &vhost->crq;
struct ibmvfc_crq *crq;
- crq = &queue->msgs[queue->cur];
+ crq = &queue->msgs.crq[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
@@ -3189,10 +3791,13 @@ static void ibmvfc_tasklet(void *data)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
+ struct ibmvfc_event *evt, *temp;
unsigned long flags;
int done = 0;
+ LIST_HEAD(evt_doneq);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
while (!done) {
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
@@ -3203,7 +3808,7 @@ static void ibmvfc_tasklet(void *data)
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
}
@@ -3216,14 +3821,138 @@ static void ibmvfc_tasklet(void *data)
wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
} else
done = 1;
}
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ timer_delete(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
+{
+ struct device *dev = scrq->vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ unsigned long rc;
+ int irq_action = H_ENABLE_VIO_INTERRUPT;
+
+ if (!enable)
+ irq_action = H_DISABLE_VIO_INTERRUPT;
+
+ rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
+ scrq->hw_irq, 0, 0);
+
+ if (rc)
+ dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
+ enable ? "enable" : "disable", scrq->hwq_id, rc);
+
+ return rc;
+}
+
+static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
+{
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
+
+ switch (crq->valid) {
+ case IBMVFC_CRQ_CMD_RSP:
+ break;
+ case IBMVFC_CRQ_XPORT_EVENT:
+ return;
+ default:
+ dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
+ return;
+ }
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
+ dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
+ crq->ioba);
+ return;
+ }
+
+ if (unlikely(atomic_dec_if_positive(&evt->active))) {
+ dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
+ crq->ioba);
+ return;
+ }
+
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
+}
+
+static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+
+ crq = &scrq->msgs.scrq[scrq->cur].crq;
+ if (crq->valid & 0x80) {
+ if (++scrq->cur == scrq->size)
+ scrq->cur = 0;
+ rmb();
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_event *evt, *temp;
+ unsigned long flags;
+ int done = 0;
+ LIST_HEAD(evt_doneq);
+
+ spin_lock_irqsave(scrq->q_lock, flags);
+ while (!done) {
+ while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ }
+
+ ibmvfc_toggle_scrq_irq(scrq, 1);
+ if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ } else
+ done = 1;
+ }
+ spin_unlock_irqrestore(scrq->q_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ timer_delete(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
+{
+ struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
+
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_drain_sub_crq(scrq);
+
+ return IRQ_HANDLED;
}
/**
@@ -3235,8 +3964,8 @@ static void ibmvfc_tasklet(void *data)
static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
- tgt->job_step = job_step;
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
+ tgt->job_step = job_step;
wake_up(&tgt->vhost->work_wait_q);
}
@@ -3252,7 +3981,7 @@ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
@@ -3327,13 +4056,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else if (prli_rsp[index].retry)
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
} else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3350,13 +4079,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3378,13 +4107,24 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
prli = &evt->iu.prli;
memset(prli, 0, sizeof(*prli));
- prli->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ prli->common.version = cpu_to_be32(2);
+ prli->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ prli->common.version = cpu_to_be32(1);
+ }
prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
prli->common.length = cpu_to_be16(sizeof(*prli));
prli->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3449,14 +4189,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
- ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
- ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3479,14 +4220,25 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
kref_get(&tgt->kref);
tgt->logo_rcvd = 0;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
- plogi->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ plogi->common.version = cpu_to_be32(2);
+ plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ plogi->common.version = cpu_to_be32(1);
+ }
plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
plogi->common.length = cpu_to_be16(sizeof(*plogi));
plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3527,35 +4279,33 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
- if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
- else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
- tgt->scsi_id != tgt->new_scsi_id)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
- * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
* @tgt: ibmvfc target struct
+ * @done: Routine to call when the event is responded to
*
+ * Returns:
+ * Allocated and initialized ibmvfc_event struct
**/
-static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
+ void (*done) (struct ibmvfc_event *))
{
struct ibmvfc_implicit_logout *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
- if (vhost->discovery_threads >= disc_threads)
- return;
-
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
- vhost->discovery_threads++;
- ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt)
+ return NULL;
+ ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
@@ -3563,6 +4313,32 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
mad->common.length = cpu_to_be16(sizeof(*mad));
mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ return evt;
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_done);
+ if (!evt) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3574,6 +4350,164 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
+ u32 status = be16_to_cpu(mad->common.status);
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+
+ /*
+ * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
+ * driver in which case we need to free up all the targets. If we are
+ * not unloading, we will still go through a hard reset to get out of
+ * offline state, so there is no need to track the old targets in that
+ * case.
+ */
+ if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
+
+ tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (!vhost->logged_in) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ return;
+ }
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ vhost->discovery_threads++;
+ evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
+ ibmvfc_tgt_implicit_logout_and_del_done);
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_tgt_move_login_done - Completion handler for Move Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->scsi_id = tgt->new_scsi_id;
+ tgt->ids.port_id = tgt->scsi_id;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+
+ tgt_log(tgt, level,
+ "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ status);
+ break;
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+
+/**
+ * ibmvfc_tgt_move_login - Initiate a move login for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_move_login *move;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ move = &evt->iu.move_login;
+ memset(move, 0, sizeof(*move));
+ move->common.version = cpu_to_be32(1);
+ move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
+ move->common.length = cpu_to_be16(sizeof(*move));
+
+ move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
+ move->wwpn = cpu_to_be64(tgt->wwpn);
+ move->node_name = cpu_to_be64(tgt->ids.node_name);
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3584,11 +4518,9 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
struct ibmvfc_target *tgt)
{
- if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
- sizeof(tgt->ids.port_name)))
+ if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
return 1;
- if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
- sizeof(tgt->ids.node_name)))
+ if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
return 1;
if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
return 1;
@@ -3610,28 +4542,28 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
- mad->iu.status, mad->iu.error,
+ be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3690,14 +4622,15 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
/**
* ibmvfc_adisc_timeout - Handle an ADISC timeout
- * @tgt: ibmvfc target struct
+ * @t: ibmvfc target struct
*
* If an ADISC times out, send a cancel. If the cancel times
* out, reset the CRQ. When the ADISC comes back as cancelled,
* log back into the target.
**/
-static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
+static void ibmvfc_adisc_timeout(struct timer_list *t)
{
+ struct ibmvfc_target *tgt = timer_container_of(tgt, t, timer);
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -3716,13 +4649,26 @@ static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
vhost->abort_threads++;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
+ vhost->abort_threads--;
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = cpu_to_be32(1);
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
tmf->common.length = cpu_to_be16(sizeof(*tmf));
tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
@@ -3761,7 +4707,13 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
@@ -3782,16 +4734,14 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
if (timer_pending(&tgt->timer))
mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
else {
- tgt->timer.data = (unsigned long) tgt;
tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
- tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
add_timer(&tgt->timer);
}
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
vhost->discovery_threads--;
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
@@ -3816,9 +4766,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
- tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ ibmvfc_del_tgt(tgt);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
@@ -3832,19 +4781,20 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_del_tgt(tgt);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
- rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
- rsp->fc_explain, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+ status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3866,7 +4816,13 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ __ibmvfc_reset_host(vhost);
+ return;
+ }
vhost->discovery_threads++;
evt->tgt = tgt;
ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
@@ -3889,37 +4845,87 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
/**
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
* @vhost: ibmvfc host struct
- * @scsi_id: SCSI ID to allocate target for
+ * @target: Holds SCSI ID to allocate target forand the WWPN
*
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
+ struct ibmvfc_discover_targets_entry *target)
{
+ struct ibmvfc_target *stgt = NULL;
+ struct ibmvfc_target *wtgt = NULL;
struct ibmvfc_target *tgt;
unsigned long flags;
+ u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
+ u64 wwpn = be64_to_cpu(target->wwpn);
+ /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->wwpn == wwpn) {
+ wtgt = tgt;
+ break;
+ }
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == scsi_id) {
- if (tgt->need_login)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ stgt = tgt;
+ break;
+ }
+ }
+
+ if (wtgt && !stgt) {
+ /*
+ * A WWPN target has moved and we still are tracking the old
+ * SCSI ID. The only way we should be able to get here is if
+ * we attempted to send an implicit logout for the old SCSI ID
+ * and it failed for some reason, such as there being I/O
+ * pending to the target. In this case, we will have already
+ * deleted the rport from the FC transport so we do a move
+ * login, which works even with I/O pending, however, if
+ * there is still I/O pending, it will stay outstanding, so
+ * we only do this if fast fail is disabled for the rport,
+ * otherwise we let terminate_rport_io clean up the port
+ * before we login at the new location.
+ */
+ if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ if (wtgt->move_login) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->new_scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ wtgt->init_retries = 0;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ }
goto unlock_out;
+ } else {
+ tgt_err(wtgt, "Unexpected target state: %d, %p\n",
+ wtgt->action, wtgt->rport);
}
+ } else if (stgt) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
- tgt->new_scsi_id = scsi_id;
+ tgt->wwpn = wwpn;
tgt->vhost = vhost;
tgt->need_login = 1;
- tgt->cancel_key = vhost->task_set++;
- init_timer(&tgt->timer);
+ timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ tgt->cancel_key = vhost->task_set++;
list_add_tail(&tgt->queue, &vhost->targets);
unlock_out:
@@ -3939,9 +4945,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
- rc = ibmvfc_alloc_target(vhost,
- be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
- IBMVFC_DISC_TGT_SCSI_ID_MASK);
+ rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]);
return rc;
}
@@ -3968,7 +4972,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -3990,7 +4994,14 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
{
struct ibmvfc_discover_targets *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ if (!evt) {
+ ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
+ ibmvfc_hard_reset_host(vhost);
+ return;
+ }
ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.discover_targets;
@@ -3998,9 +5009,10 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
mad->common.version = cpu_to_be32(1);
mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
mad->common.length = cpu_to_be16(sizeof(*mad));
- mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
- mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
- mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
+ mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
+ mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
+ mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -4009,6 +5021,162 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
+static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
+ struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int flags, active_queues, i;
+
+ ibmvfc_free_event(evt);
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
+ flags = be32_to_cpu(setup->flags);
+ vhost->do_enquiry = 0;
+ active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
+ scrqs->active_queues = active_queues;
+
+ if (flags & IBMVFC_CHANNELS_CANCELED) {
+ ibmvfc_dbg(vhost, "Channels Canceled\n");
+ vhost->using_channels = 0;
+ } else {
+ if (active_queues)
+ vhost->using_channels = 1;
+ for (i = 0; i < active_queues; i++)
+ scrqs->scrqs[i].vios_cookie =
+ be64_to_cpu(setup->channel_handles[i]);
+
+ ibmvfc_dbg(vhost, "Using %u channels\n",
+ vhost->scsi_scrqs.active_queues);
+ }
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Setup failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ return;
+ }
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+}
+
+static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_setup_mad *mad;
+ struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+ struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int i;
+
+ if (!evt) {
+ ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
+ ibmvfc_hard_reset_host(vhost);
+ return;
+ }
+
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+ else {
+ setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
+ for (i = 0; i < num_channels; i++)
+ setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
+ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_setup;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
+ mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent channel setup\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
+ u32 mad_status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
+ vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
+ ibmvfc_free_event(evt);
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ ibmvfc_free_event(evt);
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ }
+
+ ibmvfc_channel_setup(vhost);
+}
+
+static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ if (!evt) {
+ ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
+ ibmvfc_hard_reset_host(vhost);
+ return;
+ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+
+ if (mig_channels_only)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
+ if (mig_no_less_channels)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Send channel enquiry\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
/**
* ibmvfc_npiv_login_done - Completion handler for NPIV Login
* @evt: ibmvfc event struct
@@ -4033,11 +5201,12 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
+ fallthrough;
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
@@ -4067,7 +5236,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
}
vhost->logged_in = 1;
- npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
@@ -4089,8 +5258,14 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
vhost->host->max_sectors = npiv_max_sectors;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
- wake_up(&vhost->work_wait_q);
+
+ if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
+ ibmvfc_channel_enquiry(vhost);
+ } else {
+ vhost->do_enquiry = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+ }
}
/**
@@ -4101,7 +5276,13 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login_mad *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+
+ if (!evt) {
+ ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
+ ibmvfc_hard_reset_host(vhost);
+ return;
+ }
ibmvfc_gather_partition_info(vhost);
ibmvfc_set_login_info(vhost);
@@ -4122,11 +5303,11 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Sent NPIV login\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
-};
+}
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
- * @vhost: ibmvfc host struct
+ * @evt: ibmvfc event struct
*
**/
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
@@ -4138,7 +5319,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
- if (list_empty(&vhost->sent) &&
+ if (list_empty(&vhost->crq.sent) &&
vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
ibmvfc_init_host(vhost);
return;
@@ -4166,7 +5347,13 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_reserved_event(&vhost->crq);
+ if (!evt) {
+ ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
+ ibmvfc_hard_reset_host(vhost);
+ return;
+ }
+
ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.npiv_logout;
@@ -4204,6 +5391,25 @@ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_dev_logo_to_do - Is there target logout work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
+ tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* __ibmvfc_work_to_do - Is there task level work to do? (no locking)
* @vhost: ibmvfc host struct
*
@@ -4232,17 +5438,26 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
+ return 0;
+ return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
- case IBMVFC_HOST_ACTION_TGT_DEL:
- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4304,9 +5519,17 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ tgt->rport = NULL;
+ tgt->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
@@ -4324,8 +5547,6 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
rport->supported_classes |= FC_COS_CLASS2;
if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
- if (rport->rqst_q)
- blk_queue_max_segments(rport->rqst_q, 1);
} else
tgt_dbg(tgt, "rport add failed\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -4341,6 +5562,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
struct ibmvfc_target *tgt;
unsigned long flags;
struct fc_rport *rport;
+ LIST_HEAD(purge);
int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log);
@@ -4352,26 +5574,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_RESET:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reset_crq(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc == H_CLOSED)
+ if (!rc || rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
+ /*
+ * The only action we could have changed to would have
+ * been reenable, in which case, we skip the rest of
+ * this path and wait until we've done the re-enable
+ * before sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_REENABLE:
- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reenable_crq_queue(vhost);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
- if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
- dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
+ /*
+ * The only action we could have changed to would have
+ * been reset, in which case, we skip the rest of this
+ * path and wait until we've done the reset before
+ * sending the crq init.
+ */
+ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
+ }
}
break;
case IBMVFC_HOST_ACTION_LOGO:
@@ -4406,6 +5651,18 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (ibmvfc_dev_logo_to_do(vhost)) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return;
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
@@ -4415,9 +5672,31 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ tgt->init_retries = 0;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+
+ /*
+ * If fast fail is enabled, we wait for it to fire and then clean up
+ * the old port, since we expect the fast fail timer to clean up the
+ * outstanding I/O faster than waiting for normal command timeouts.
+ * However, if fast fail is disabled, any I/O outstanding to the
+ * rport LUNs will stay outstanding indefinitely, since the EH handlers
+ * won't get invoked for I/O's timing out. If this is a NPIV failover
+ * scenario, the better alternative is to use the move login.
+ */
+ if (rport && rport->fast_io_fail_tmo == -1)
+ tgt->move_login = 1;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ return;
}
}
@@ -4470,7 +5749,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
@@ -4506,6 +5785,74 @@ static int ibmvfc_work(void *data)
}
/**
+ * ibmvfc_alloc_queue - Allocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue to allocate
+ * @fmt: queue format to allocate
+ *
+ * Returns:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue,
+ enum ibmvfc_msg_fmt fmt)
+{
+ struct device *dev = vhost->dev;
+ size_t fmt_size;
+
+ ENTER;
+ spin_lock_init(&queue->_lock);
+ queue->q_lock = &queue->_lock;
+
+ switch (fmt) {
+ case IBMVFC_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.crq);
+ queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+ queue->evt_depth = scsi_qdepth;
+ queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
+ break;
+ case IBMVFC_ASYNC_FMT:
+ fmt_size = sizeof(*queue->msgs.async);
+ break;
+ case IBMVFC_SUB_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.scrq);
+ /* We need one extra event for Cancel Commands */
+ queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
+ queue->evt_depth = scsi_qdepth;
+ queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
+ break;
+ default:
+ dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
+ return -EINVAL;
+ }
+
+ queue->fmt = fmt;
+ if (ibmvfc_init_event_pool(vhost, queue)) {
+ dev_err(dev, "Couldn't initialize event pool.\n");
+ return -ENOMEM;
+ }
+
+ queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!queue->msgs.handle)
+ return -ENOMEM;
+
+ queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, queue->msg_token)) {
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+ return -ENOMEM;
+ }
+
+ queue->cur = 0;
+ queue->size = PAGE_SIZE / fmt_size;
+
+ queue->vhost = vhost;
+ return 0;
+}
+
+/**
* ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
* @vhost: ibmvfc host struct
*
@@ -4520,21 +5867,12 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
int rc, retrc = -ENOMEM;
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ENTER;
- crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
-
- if (!crq->msgs)
+ if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
return -ENOMEM;
- crq->size = PAGE_SIZE / sizeof(*crq->msgs);
- crq->msg_token = dma_map_single(dev, crq->msgs,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, crq->msg_token))
- goto map_failed;
-
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
@@ -4563,7 +5901,6 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
goto req_irq_failed;
}
- crq->cur = 0;
LEAVE;
return retrc;
@@ -4573,12 +5910,229 @@ req_irq_failed:
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_crq_failed:
- dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
-map_failed:
- free_page((unsigned long)crq->msgs);
+ ibmvfc_free_queue(vhost, crq);
return retrc;
}
+static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels,
+ int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &channels->scrqs[index];
+ int rc = -ENOMEM;
+
+ ENTER;
+
+ rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
+ &scrq->cookie, &scrq->hw_irq);
+
+ /* H_CLOSED indicates successful register, but no CRQ partner */
+ if (rc && rc != H_CLOSED) {
+ dev_warn(dev, "Error registering sub-crq: %d\n", rc);
+ if (rc == H_PARAMETER)
+ dev_warn_once(dev, "Firmware may not support MQ\n");
+ goto reg_failed;
+ }
+
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (!scrq->irq) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
+ goto irq_failed;
+ }
+
+ switch (channels->protocol) {
+ case IBMVFC_PROTO_SCSI:
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
+ vdev->unit_address, index);
+ scrq->handler = ibmvfc_interrupt_mq;
+ break;
+ case IBMVFC_PROTO_NVME:
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
+ vdev->unit_address, index);
+ scrq->handler = ibmvfc_interrupt_mq;
+ break;
+ default:
+ dev_err(dev, "Unknown channel protocol (%d)\n",
+ channels->protocol);
+ goto irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
+ irq_dispose_mapping(scrq->irq);
+ goto irq_failed;
+ }
+
+ scrq->hwq_id = index;
+
+ LEAVE;
+ return 0;
+
+irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_failed:
+ LEAVE;
+ return rc;
+}
+
+static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels,
+ int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &channels->scrqs[index];
+ long rc;
+
+ ENTER;
+
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+ scrq->irq = 0;
+
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
+ scrq->cookie);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
+
+ /* Clean out the queue */
+ memset(scrq->msgs.crq, 0, PAGE_SIZE);
+ scrq->cur = 0;
+
+ LEAVE;
+}
+
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
+{
+ int i, j;
+
+ ENTER;
+ if (!vhost->mq_enabled || !channels->scrqs)
+ return;
+
+ for (i = 0; i < channels->max_queues; i++) {
+ if (ibmvfc_register_channel(vhost, channels, i)) {
+ for (j = i; j > 0; j--)
+ ibmvfc_deregister_channel(vhost, channels, j - 1);
+ vhost->do_enquiry = 0;
+ return;
+ }
+ }
+
+ LEAVE;
+}
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
+{
+ int i;
+
+ ENTER;
+ if (!vhost->mq_enabled || !channels->scrqs)
+ return;
+
+ for (i = 0; i < channels->max_queues; i++)
+ ibmvfc_deregister_channel(vhost, channels, i);
+
+ LEAVE;
+}
+
+static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
+{
+ struct ibmvfc_queue *scrq;
+ int i, j;
+ int rc = 0;
+
+ channels->scrqs = kcalloc(channels->max_queues,
+ sizeof(*channels->scrqs),
+ GFP_KERNEL);
+ if (!channels->scrqs)
+ return -ENOMEM;
+
+ for (i = 0; i < channels->max_queues; i++) {
+ scrq = &channels->scrqs[i];
+ rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
+ if (rc) {
+ for (j = i; j > 0; j--) {
+ scrq = &channels->scrqs[j - 1];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+ kfree(channels->scrqs);
+ channels->scrqs = NULL;
+ channels->active_queues = 0;
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+{
+ ENTER;
+ if (!vhost->mq_enabled)
+ return;
+
+ if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
+ vhost->do_enquiry = 0;
+ vhost->mq_enabled = 0;
+ return;
+ }
+
+ ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
+
+ LEAVE;
+}
+
+static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
+ struct ibmvfc_channels *channels)
+{
+ struct ibmvfc_queue *scrq;
+ int i;
+
+ if (channels->scrqs) {
+ for (i = 0; i < channels->max_queues; i++) {
+ scrq = &channels->scrqs[i];
+ ibmvfc_free_queue(vhost, scrq);
+ }
+
+ kfree(channels->scrqs);
+ channels->scrqs = NULL;
+ channels->active_queues = 0;
+ }
+}
+
+static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+{
+ ENTER;
+ if (!vhost->scsi_scrqs.scrqs)
+ return;
+
+ ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
+
+ ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
+ LEAVE;
+}
+
+static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+ dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf,
+ channels->disc_buf_dma);
+}
+
/**
* ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct
@@ -4588,22 +6142,36 @@ map_failed:
**/
static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
ENTER;
mempool_destroy(vhost->tgt_pool);
kfree(vhost->trace);
- dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
- vhost->disc_buf_dma);
+ ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs);
dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
+ dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
+ vhost->channel_setup_buf, vhost->channel_setup_dma);
dma_pool_destroy(vhost->sg_pool);
- dma_unmap_single(vhost->dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
LEAVE;
}
+static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+ channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
+ channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz,
+ &channels->disc_buf_dma, GFP_KERNEL);
+
+ if (!channels->disc_buf) {
+ dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
+ (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
/**
* ibmvfc_alloc_mem - Allocate memory for vhost
* @vhost: ibmvfc host struct
@@ -4613,26 +6181,15 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
**/
static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
struct device *dev = vhost->dev;
ENTER;
- async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
- if (!async_q->msgs) {
- dev_err(dev, "Couldn't allocate async queue.\n");
+ if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
+ dev_err(dev, "Couldn't allocate/map async queue.\n");
goto nomem;
}
- async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
- async_q->msg_token = dma_map_single(dev, async_q->msgs,
- async_q->size * sizeof(*async_q->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, async_q->msg_token)) {
- dev_err(dev, "Failed to map async queue\n");
- goto free_async_crq;
- }
-
vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
SG_ALL * sizeof(struct srp_direct_buf),
sizeof(struct srp_direct_buf), 0);
@@ -4650,20 +6207,15 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_sg_pool;
}
- vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
- vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
- &vhost->disc_buf_dma, GFP_KERNEL);
-
- if (!vhost->disc_buf) {
- dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+ if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs))
goto free_login_buffer;
- }
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+ atomic_set(&vhost->trace_index, -1);
if (!vhost->trace)
- goto free_disc_buffer;
+ goto free_scsi_disc_buffer;
vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
sizeof(struct ibmvfc_target));
@@ -4673,24 +6225,31 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_trace;
}
+ vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
+ &vhost->channel_setup_dma,
+ GFP_KERNEL);
+
+ if (!vhost->channel_setup_buf) {
+ dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
+ goto free_tgt_pool;
+ }
+
LEAVE;
return 0;
+free_tgt_pool:
+ mempool_destroy(vhost->tgt_pool);
free_trace:
kfree(vhost->trace);
-free_disc_buffer:
- dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
- vhost->disc_buf_dma);
+free_scsi_disc_buffer:
+ ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs);
free_login_buffer:
dma_free_coherent(dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
free_sg_pool:
dma_pool_destroy(vhost->sg_pool);
unmap_async_crq:
- dma_unmap_single(dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
-free_async_crq:
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
nomem:
LEAVE;
return -ENOMEM;
@@ -4762,6 +6321,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct Scsi_Host *shost;
struct device *dev = &vdev->dev;
int rc = -ENOMEM;
+ unsigned int online_cpus = num_online_cpus();
+ unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
ENTER;
shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
@@ -4771,23 +6332,32 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
shost->transportt = ibmvfc_transport_template;
- shost->can_queue = max_requests;
+ shost->can_queue = scsi_qdepth;
shost->max_lun = max_lun;
shost->max_id = max_targets;
- shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_sectors = max_sectors;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
+ shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
vhost = shost_priv(shost);
- INIT_LIST_HEAD(&vhost->sent);
- INIT_LIST_HEAD(&vhost->free);
INIT_LIST_HEAD(&vhost->targets);
+ INIT_LIST_HEAD(&vhost->purge);
sprintf(vhost->name, IBMVFC_NAME);
vhost->host = shost;
vhost->dev = dev;
vhost->partition_number = -1;
vhost->log_level = log_level;
vhost->task_set = 1;
+
+ vhost->mq_enabled = mq_enabled;
+ vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
+ vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
+ vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
+ vhost->using_channels = 0;
+ vhost->do_enquiry = 1;
+ vhost->scan_timeout = 0;
+
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
@@ -4803,6 +6373,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
+ rc = PTR_ERR(vhost->work_thread);
goto free_host_mem;
}
@@ -4811,13 +6382,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto kill_kthread;
}
- if ((rc = ibmvfc_init_event_pool(vhost))) {
- dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
- goto release_crq;
- }
-
if ((rc = scsi_add_host(shost, dev)))
- goto release_event_pool;
+ goto release_crq;
fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
@@ -4827,8 +6393,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto remove_shost;
}
- if (shost_to_fc_host(shost)->rqst_q)
- blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
+ ibmvfc_init_sub_crqs(vhost);
+
dev_set_drvdata(dev, vhost);
spin_lock(&ibmvfc_driver_lock);
list_add_tail(&vhost->queue, &ibmvfc_head);
@@ -4840,8 +6406,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
remove_shost:
scsi_remove_host(shost);
-release_event_pool:
- ibmvfc_free_event_pool(vhost);
release_crq:
ibmvfc_release_crq_queue(vhost);
kill_kthread:
@@ -4862,9 +6426,10 @@ out:
* Return value:
* 0
**/
-static int ibmvfc_remove(struct vio_dev *vdev)
+static void ibmvfc_remove(struct vio_dev *vdev)
{
struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+ LIST_HEAD(purge);
unsigned long flags;
ENTER;
@@ -4875,15 +6440,17 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_wait_while_resetting(vhost);
- ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
- ibmvfc_free_event_pool(vhost);
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
+ ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_release_crq_queue(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
@@ -4891,7 +6458,6 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_unlock(&ibmvfc_driver_lock);
scsi_host_put(vhost->host);
LEAVE;
- return 0;
}
/**
@@ -4925,11 +6491,13 @@ static int ibmvfc_resume(struct device *dev)
*/
static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
{
- unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+ unsigned long pool_dma;
+
+ pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
-static struct vio_device_id ibmvfc_device_table[] = {
+static const struct vio_device_id ibmvfc_device_table[] = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
};
@@ -4981,6 +6549,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
.get_starget_port_id = ibmvfc_get_starget_port_id,
.show_starget_port_id = 1,
+ .max_bsg_segments = 1,
.bsg_request = ibmvfc_bsg_request,
.bsg_timeout = ibmvfc_bsg_timeout,
};
@@ -4993,6 +6562,7 @@ static struct fc_function_template ibmvfc_transport_functions = {
**/
static int __init ibmvfc_module_init(void)
{
+ int min_max_sectors = PAGE_SIZE >> 9;
int rc;
if (!firmware_has_feature(FW_FEATURE_VIO))
@@ -5001,6 +6571,16 @@ static int __init ibmvfc_module_init(void)
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+ /*
+ * Range check the max_sectors module parameter. The upper bounds is
+ * implicity checked since the parameter is a ushort.
+ */
+ if (max_sectors < min_max_sectors) {
+ printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
+ min_max_sectors);
+ max_sectors = min_max_sectors;
+ }
+
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template)
return -ENOMEM;