diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi/ibmvfc.c')
| -rw-r--r-- | drivers/scsi/ibmvscsi/ibmvfc.c | 3139 |
1 files changed, 2352 insertions, 787 deletions
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 4e31caa21ddf..228daffb286d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1,24 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter * * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation * * Copyright (C) IBM Corporation, 2008 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * */ #include <linux/module.h> @@ -27,11 +13,13 @@ #include <linux/dmapool.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/stringify.h> +#include <linux/bsg-lib.h> #include <asm/firmware.h> #include <asm/irq.h> #include <asm/vio.h> @@ -46,12 +34,21 @@ static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT; static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT; -static unsigned int max_lun = IBMVFC_MAX_LUN; +static u64 max_lun = IBMVFC_MAX_LUN; static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; +static u16 max_sectors = IBMVFC_MAX_SECTORS; +static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; static unsigned int ibmvfc_debug = IBMVFC_DEBUG; static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; +static unsigned int cls3_error = IBMVFC_CLS3_ERROR; +static unsigned int mq_enabled = IBMVFC_MQ; +static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES; +static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS; +static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ; +static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M; + static LIST_HEAD(ibmvfc_head); static DEFINE_SPINLOCK(ibmvfc_driver_lock); static struct scsi_transport_template *ibmvfc_transport_template; @@ -61,6 +58,22 @@ MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(IBMVFC_DRIVER_VERSION); +module_param_named(mq, mq_enabled, uint, S_IRUGO); +MODULE_PARM_DESC(mq, "Enable multiqueue support. " + "[Default=" __stringify(IBMVFC_MQ) "]"); +module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO); +MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. " + "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]"); +module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO); +MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. " + "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]"); +module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO); +MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. " + "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]"); +module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO); +MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. " + "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]"); + module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. " "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]"); @@ -71,7 +84,13 @@ MODULE_PARM_DESC(default_timeout, module_param_named(max_requests, max_requests, uint, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. " "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]"); -module_param_named(max_lun, max_lun, uint, S_IRUGO); +module_param_named(max_sectors, max_sectors, ushort, S_IRUGO); +MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. " + "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]"); +module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO); +MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. " + "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]"); +module_param_named(max_lun, max_lun, ullong, S_IRUGO); MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. " "[Default=" __stringify(IBMVFC_MAX_LUN) "]"); module_param_named(max_targets, max_targets, uint, S_IRUGO); @@ -86,6 +105,9 @@ MODULE_PARM_DESC(debug, "Enable driver debug information. " module_param_named(log_level, log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); +module_param_named(cls3_error, cls3_error, uint, 0); +MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. " + "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]"); static const struct { u16 status; @@ -134,6 +156,7 @@ static const struct { { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, + { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." }, }; static void ibmvfc_npiv_login(struct ibmvfc_host *); @@ -141,9 +164,53 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); static void ibmvfc_npiv_logout(struct ibmvfc_host *); +static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); +static void ibmvfc_tgt_move_login(struct ibmvfc_target *); + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *); +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *); static const char *unknown_error = "unknown error"; +static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba, + unsigned long length, unsigned long *cookie, + unsigned long *irq) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length); + *cookie = retbuf[0]; + *irq = retbuf[1]; + + return rc; +} + +static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags) +{ + u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities); + + return (host_caps & cap_flags) ? 1 : 0; +} + +static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost, + struct ibmvfc_cmd *vfc_cmd) +{ + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + return &vfc_cmd->v2.iu; + else + return &vfc_cmd->v1.iu; +} + +static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost, + struct ibmvfc_cmd *vfc_cmd) +{ + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + return &vfc_cmd->v2.rsp; + else + return &vfc_cmd->v1.rsp; +} + #ifdef CONFIG_SCSI_IBMVFC_TRACE /** * ibmvfc_trc_start - Log a start trace entry @@ -155,9 +222,11 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; struct ibmvfc_mad_common *mad = &evt->iu.mad_common; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_trace_entry *entry; + int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; - entry = &vhost->trace[vhost->trace_index++]; + entry = &vhost->trace[index]; entry->evt = evt; entry->time = jiffies; entry->fmt = evt->crq.format; @@ -165,18 +234,18 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt) switch (entry->fmt) { case IBMVFC_CMD_FORMAT: - entry->op_code = vfc_cmd->iu.cdb[0]; - entry->scsi_id = vfc_cmd->tgt_scsi_id; - entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); - entry->tmf_flags = vfc_cmd->iu.tmf_flags; - entry->u.start.xfer_len = vfc_cmd->iu.xfer_len; + entry->op_code = iu->cdb[0]; + entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); + entry->lun = scsilun_to_int(&iu->lun); + entry->tmf_flags = iu->tmf_flags; + entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len); break; case IBMVFC_MAD_FORMAT: - entry->op_code = mad->opcode; + entry->op_code = be32_to_cpu(mad->opcode); break; default: break; - }; + } } /** @@ -189,8 +258,12 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; - struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++]; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + struct ibmvfc_trace_entry *entry; + int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK; + entry = &vhost->trace[index]; entry->evt = evt; entry->time = jiffies; entry->fmt = evt->crq.format; @@ -198,24 +271,24 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt) switch (entry->fmt) { case IBMVFC_CMD_FORMAT: - entry->op_code = vfc_cmd->iu.cdb[0]; - entry->scsi_id = vfc_cmd->tgt_scsi_id; - entry->lun = scsilun_to_int(&vfc_cmd->iu.lun); - entry->tmf_flags = vfc_cmd->iu.tmf_flags; - entry->u.end.status = vfc_cmd->status; - entry->u.end.error = vfc_cmd->error; - entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags; - entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code; - entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status; + entry->op_code = iu->cdb[0]; + entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); + entry->lun = scsilun_to_int(&iu->lun); + entry->tmf_flags = iu->tmf_flags; + entry->u.end.status = be16_to_cpu(vfc_cmd->status); + entry->u.end.error = be16_to_cpu(vfc_cmd->error); + entry->u.end.fcp_rsp_flags = rsp->flags; + entry->u.end.rsp_code = rsp->data.info.rsp_code; + entry->u.end.scsi_status = rsp->scsi_status; break; case IBMVFC_MAD_FORMAT: - entry->op_code = mad->opcode; - entry->u.end.status = mad->status; + entry->op_code = be32_to_cpu(mad->opcode); + entry->u.end.status = be16_to_cpu(mad->status); break; default: break; - }; + } } #else @@ -261,23 +334,24 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error) /** * ibmvfc_get_err_result - Find the scsi status to return for the fcp response + * @vhost: ibmvfc host struct * @vfc_cmd: ibmvfc command struct * * Return value: * SCSI result value to return for completed command **/ -static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) +static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd) { int err; - struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; - int fc_rsp_len = rsp->fcp_rsp_len; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len); if ((rsp->flags & FCP_RSP_LEN_VALID) && ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; - err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); + err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); if (err >= 0) return rsp->scsi_status | (cmd_status[err].result << 16); return rsp->scsi_status | (DID_ERROR << 16); @@ -421,22 +495,59 @@ static const char *ibmvfc_get_fc_type(u16 status) * @tgt: ibmvfc target struct * @action: action to perform * + * Returns: + * 0 if action changed / non-zero if not changed **/ -static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, +static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, enum ibmvfc_target_action action) { + int rc = -EINVAL; + switch (tgt->action) { + case IBMVFC_TGT_ACTION_LOGOUT_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT || + action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT: + if (action == IBMVFC_TGT_ACTION_DEL_RPORT || + action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { + tgt->action = action; + rc = 0; + } + break; + case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT: + if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + tgt->action = action; + rc = 0; + } + break; case IBMVFC_TGT_ACTION_DEL_RPORT: - if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) + if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) { tgt->action = action; + rc = 0; + } + break; case IBMVFC_TGT_ACTION_DELETED_RPORT: break; default: - if (action == IBMVFC_TGT_ACTION_DEL_RPORT) - tgt->add_rport = 0; tgt->action = action; + rc = 0; break; } + + if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT) + tgt->add_rport = 0; + + return rc; } /** @@ -459,7 +570,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost, default: vhost->state = state; break; - }; + } return rc; } @@ -495,14 +606,23 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, break; default: break; - }; + } break; case IBMVFC_HOST_ACTION_TGT_INIT: if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_REENABLE: + case IBMVFC_HOST_ACTION_RESET: + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_LOGO: + case IBMVFC_HOST_ACTION_QUERY_TGTS: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_NONE: + default: switch (vhost->action) { case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: @@ -510,18 +630,9 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, default: vhost->action = action; break; - }; - break; - case IBMVFC_HOST_ACTION_LOGO: - case IBMVFC_HOST_ACTION_QUERY_TGTS: - case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_RESET: - case IBMVFC_HOST_ACTION_REENABLE: - default: - vhost->action = action; + } break; - }; + } } /** @@ -533,7 +644,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, **/ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) { - if (vhost->action == IBMVFC_HOST_ACTION_NONE) { + if (vhost->action == IBMVFC_HOST_ACTION_NONE && + vhost->state == IBMVFC_ACTIVE) { if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); @@ -545,6 +657,19 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) } /** + * ibmvfc_del_tgt - Schedule cleanup and removal of the target + * @tgt: ibmvfc target struct + **/ +static void ibmvfc_del_tgt(struct ibmvfc_target *tgt) +{ + if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) { + tgt->job_step = ibmvfc_tgt_implicit_logout_and_del; + tgt->init_retries = 0; + } + wake_up(&tgt->vhost->work_wait_q); +} + +/** * ibmvfc_link_down - Handle a link down event from the adapter * @vhost: ibmvfc host struct * @state: ibmvfc host state to enter @@ -558,7 +683,7 @@ static void ibmvfc_link_down(struct ibmvfc_host *vhost, ENTER; scsi_block_requests(vhost->host); list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); ibmvfc_set_host_state(vhost, state); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL); vhost->events_to_log |= IBMVFC_AE_LINKDOWN; @@ -587,11 +712,16 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) } if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - memset(vhost->async_crq.msgs, 0, PAGE_SIZE); + memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE); vhost->async_crq.cur = 0; - list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -614,6 +744,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2) return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } +static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1, + u64 word2, u64 word3, u64 word4) +{ + struct vio_dev *vdev = to_vio_dev(vhost->dev); + + return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie, + word1, word2, word3, word4); +} + /** * ibmvfc_send_crq_init - Send a CRQ init message * @vhost: ibmvfc host struct @@ -641,6 +780,115 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost) } /** + * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host + * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct + * + * Returns zero on success. + **/ +static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + if (!queue->total_depth) + return 0; + + pool->size = queue->total_depth; + pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); + if (!pool->events) + return -ENOMEM; + + pool->iu_storage = dma_alloc_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + &pool->iu_token, 0); + + if (!pool->iu_storage) { + kfree(pool->events); + return -ENOMEM; + } + + INIT_LIST_HEAD(&queue->sent); + INIT_LIST_HEAD(&queue->free); + queue->evt_free = queue->evt_depth; + queue->reserved_free = queue->reserved_depth; + spin_lock_init(&queue->l_lock); + + for (i = 0; i < pool->size; ++i) { + struct ibmvfc_event *evt = &pool->events[i]; + + /* + * evt->active states + * 1 = in flight + * 0 = being completed + * -1 = free/freed + */ + atomic_set(&evt->active, -1); + atomic_set(&evt->free, 1); + evt->crq.valid = 0x80; + evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); + evt->xfer_iu = pool->iu_storage + i; + evt->vhost = vhost; + evt->queue = queue; + evt->ext_list = NULL; + list_add_tail(&evt->queue_list, &queue->free); + } + + LEAVE; + return 0; +} + +/** + * ibmvfc_free_event_pool - Frees memory of the event pool of a host + * @vhost: ibmvfc host who owns the event pool + * @queue: ibmvfc queue struct + * + **/ +static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + int i; + struct ibmvfc_event_pool *pool = &queue->evt_pool; + + ENTER; + for (i = 0; i < pool->size; ++i) { + list_del(&pool->events[i].queue_list); + BUG_ON(atomic_read(&pool->events[i].free) != 1); + if (pool->events[i].ext_list) + dma_pool_free(vhost->sg_pool, + pool->events[i].ext_list, + pool->events[i].ext_list_token); + } + + kfree(pool->events); + dma_free_coherent(vhost->dev, + pool->size * sizeof(*pool->iu_storage), + pool->iu_storage, pool->iu_token); + LEAVE; +} + +/** + * ibmvfc_free_queue - Deallocate queue + * @vhost: ibmvfc host struct + * @queue: ibmvfc queue struct + * + * Unmaps dma and deallocates page for messages + **/ +static void ibmvfc_free_queue(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue) +{ + struct device *dev = vhost->dev; + + dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs.handle); + queue->msgs.handle = NULL; + + ibmvfc_free_event_pool(vhost, queue); +} + +/** * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ * @vhost: ibmvfc host struct * @@ -651,7 +899,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) { long rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); - struct ibmvfc_crq_queue *crq = &vhost->crq; + struct ibmvfc_queue *crq = &vhost->crq; ibmvfc_dbg(vhost, "Releasing CRQ\n"); free_irq(vdev->irq, vhost); @@ -664,8 +912,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; - dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); - free_page((unsigned long)crq->msgs); + + ibmvfc_free_queue(vhost, crq); } /** @@ -679,6 +927,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) { int rc = 0; struct vio_dev *vdev = to_vio_dev(vhost->dev); + unsigned long flags; + + ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs); /* Re-enable the CRQ */ do { @@ -690,6 +941,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) if (rc) dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc); + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); + vhost->do_enquiry = 1; + vhost->using_channels = 0; + spin_unlock(vhost->crq.q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs); + return rc; } @@ -705,7 +965,9 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) int rc = 0; unsigned long flags; struct vio_dev *vdev = to_vio_dev(vhost->dev); - struct ibmvfc_crq_queue *crq = &vhost->crq; + struct ibmvfc_queue *crq = &vhost->crq; + + ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs); /* Close the CRQ */ do { @@ -715,12 +977,14 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); vhost->state = IBMVFC_NO_CRQ; vhost->logged_in = 0; - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + vhost->do_enquiry = 1; + vhost->using_channels = 0; /* Clean out the queue */ - memset(crq->msgs, 0, PAGE_SIZE); + memset(crq->msgs.crq, 0, PAGE_SIZE); crq->cur = 0; /* And re-open it again */ @@ -732,8 +996,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) dev_warn(vhost->dev, "Partner adapter not ready\n"); else if (rc != 0) dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); + + spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs); + return rc; } @@ -763,12 +1031,24 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, **/ static void ibmvfc_free_event(struct ibmvfc_event *evt) { - struct ibmvfc_host *vhost = evt->vhost; - struct ibmvfc_event_pool *pool = &vhost->pool; + struct ibmvfc_event_pool *pool = &evt->queue->evt_pool; + unsigned long flags; BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(atomic_inc_return(&evt->free) != 1); - list_add_tail(&evt->queue, &vhost->free); + BUG_ON(atomic_dec_and_test(&evt->active)); + + spin_lock_irqsave(&evt->queue->l_lock, flags); + list_add_tail(&evt->queue_list, &evt->queue->free); + if (evt->reserved) { + evt->reserved = 0; + evt->queue->reserved_free++; + } else { + evt->queue->evt_free++; + } + if (evt->eh_comp) + complete(evt->eh_comp); + spin_unlock_irqrestore(&evt->queue->l_lock, flags); } /** @@ -784,16 +1064,31 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) if (cmnd) { scsi_dma_unmap(cmnd); - cmnd->scsi_done(cmnd); + scsi_done(cmnd); } - if (evt->eh_comp) - complete(evt->eh_comp); - ibmvfc_free_event(evt); } /** + * ibmvfc_complete_purge - Complete failed command list + * @purge_list: list head of failed commands + * + * This function runs completions on commands to fail as a result of a + * host reset or platform migration. + **/ +static void ibmvfc_complete_purge(struct list_head *purge_list) +{ + struct ibmvfc_event *evt, *pos; + + list_for_each_entry_safe(evt, pos, purge_list, queue_list) { + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +/** * ibmvfc_fail_request - Fail request with specified error code * @evt: ibmvfc event struct * @error_code: error code to fail request with @@ -803,16 +1098,19 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) **/ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) { + /* + * Anything we are failing should still be active. Otherwise, it + * implies we already got a response for the command and are doing + * something bad like double completing it. + */ + BUG_ON(!atomic_dec_and_test(&evt->active)); if (evt->cmnd) { evt->cmnd->result = (error_code << 16); evt->done = ibmvfc_scsi_eh_done; } else - evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED; + evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); - list_del(&evt->queue); - del_timer(&evt->timer); - ibmvfc_trc_end(evt); - evt->done(evt); + timer_delete(&evt->timer); } /** @@ -826,10 +1124,30 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) { struct ibmvfc_event *evt, *pos; + struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; + unsigned long flags; + int hwqs = 0; + int i; + + if (vhost->using_channels) + hwqs = vhost->scsi_scrqs.active_queues; ibmvfc_dbg(vhost, "Purging all requests\n"); - list_for_each_entry_safe(evt, pos, &vhost->sent, queue) + spin_lock_irqsave(&vhost->crq.l_lock, flags); + list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) ibmvfc_fail_request(evt, error_code); + list_splice_init(&vhost->crq.sent, &vhost->purge); + spin_unlock_irqrestore(&vhost->crq.l_lock, flags); + + for (i = 0; i < hwqs; i++) { + spin_lock_irqsave(queues[i].q_lock, flags); + spin_lock(&queues[i].l_lock); + list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list) + ibmvfc_fail_request(evt, error_code); + list_splice_init(&queues[i].sent, &vhost->purge); + spin_unlock(&queues[i].l_lock); + spin_unlock_irqrestore(queues[i].q_lock, flags); + } } /** @@ -955,7 +1273,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); if (vhost->state == IBMVFC_ACTIVE) { - switch (vhost->login_buf->resp.link_speed / 100) { + switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) { case 1: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; @@ -976,7 +1294,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost) break; default: ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n", - vhost->login_buf->resp.link_speed / 100); + be64_to_cpu(vhost->login_buf->resp.link_speed) / 100); fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } @@ -1133,6 +1451,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost) /** * ibmvfc_gather_partition_info - Gather info about the LPAR + * @vhost: ibmvfc host struct * * Return value: * none @@ -1149,7 +1468,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) name = of_get_property(rootdn, "ibm,partition-name", NULL); if (name) - strncpy(vhost->partition_name, name, sizeof(vhost->partition_name)); + strscpy(vhost->partition_name, name, sizeof(vhost->partition_name)); num = of_get_property(rootdn, "ibm,partition-no", NULL); if (num) vhost->partition_number = *num; @@ -1166,118 +1485,98 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost) static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_login *login_info = &vhost->login_info; + struct ibmvfc_queue *async_crq = &vhost->async_crq; struct device_node *of_node = vhost->dev->of_node; const char *location; + u16 max_cmds; + + max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ; + if (mq_enabled) + max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) * + vhost->scsi_scrqs.desired_queues; memset(login_info, 0, sizeof(*login_info)); - login_info->ostype = IBMVFC_OS_LINUX; - login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9; - login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu); - login_info->max_response = sizeof(struct ibmvfc_fcp_rsp); - login_info->partition_num = vhost->partition_number; - login_info->vfc_frame_version = 1; - login_info->fcp_version = 3; - login_info->flags = IBMVFC_FLUSH_ON_HALT; + login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX); + login_info->max_dma_len = cpu_to_be64(max_sectors << 9); + login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu)); + login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp)); + login_info->partition_num = cpu_to_be32(vhost->partition_number); + login_info->vfc_frame_version = cpu_to_be32(1); + login_info->fcp_version = cpu_to_be16(3); + login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT); if (vhost->client_migrated) - login_info->flags |= IBMVFC_CLIENT_MIGRATED; - - login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; - login_info->capabilities = IBMVFC_CAN_MIGRATE; - login_info->async.va = vhost->async_crq.msg_token; - login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs); - strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); - strncpy(login_info->device_name, - dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME); - - location = of_get_property(of_node, "ibm,loc-code", NULL); - location = location ? location : dev_name(vhost->dev); - strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); -} - -/** - * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host - * @vhost: ibmvfc host who owns the event pool - * - * Returns zero on success. - **/ -static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost) -{ - int i; - struct ibmvfc_event_pool *pool = &vhost->pool; + login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED); - ENTER; - pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ; - pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); - if (!pool->events) - return -ENOMEM; + login_info->max_cmds = cpu_to_be32(max_cmds); + login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN); - pool->iu_storage = dma_alloc_coherent(vhost->dev, - pool->size * sizeof(*pool->iu_storage), - &pool->iu_token, 0); + if (vhost->mq_enabled || vhost->using_channels) + login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS); - if (!pool->iu_storage) { - kfree(pool->events); - return -ENOMEM; - } + login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token); + login_info->async.len = cpu_to_be32(async_crq->size * + sizeof(*async_crq->msgs.async)); + strscpy(login_info->partition_name, vhost->partition_name, + sizeof(login_info->partition_name)); - for (i = 0; i < pool->size; ++i) { - struct ibmvfc_event *evt = &pool->events[i]; - atomic_set(&evt->free, 1); - evt->crq.valid = 0x80; - evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i); - evt->xfer_iu = pool->iu_storage + i; - evt->vhost = vhost; - evt->ext_list = NULL; - list_add_tail(&evt->queue, &vhost->free); - } + strscpy(login_info->device_name, + dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name)); - LEAVE; - return 0; + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(vhost->dev); + strscpy(login_info->drc_name, location, sizeof(login_info->drc_name)); } /** - * ibmvfc_free_event_pool - Frees memory of the event pool of a host - * @vhost: ibmvfc host who owns the event pool + * __ibmvfc_get_event - Gets the next free event in pool + * @queue: ibmvfc queue struct + * @reserved: event is for a reserved management command * + * Returns a free event from the pool. **/ -static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost) +static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved) { - int i; - struct ibmvfc_event_pool *pool = &vhost->pool; + struct ibmvfc_event *evt = NULL; + unsigned long flags; - ENTER; - for (i = 0; i < pool->size; ++i) { - list_del(&pool->events[i].queue); - BUG_ON(atomic_read(&pool->events[i].free) != 1); - if (pool->events[i].ext_list) - dma_pool_free(vhost->sg_pool, - pool->events[i].ext_list, - pool->events[i].ext_list_token); + spin_lock_irqsave(&queue->l_lock, flags); + if (reserved && queue->reserved_free) { + evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list); + evt->reserved = 1; + queue->reserved_free--; + } else if (queue->evt_free) { + evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list); + queue->evt_free--; + } else { + goto out; } - kfree(pool->events); - dma_free_coherent(vhost->dev, - pool->size * sizeof(*pool->iu_storage), - pool->iu_storage, pool->iu_token); - LEAVE; + atomic_set(&evt->free, 0); + list_del(&evt->queue_list); +out: + spin_unlock_irqrestore(&queue->l_lock, flags); + return evt; } +#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0) +#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1) + /** - * ibmvfc_get_event - Gets the next free event in pool - * @vhost: ibmvfc host struct + * ibmvfc_locked_done - Calls evt completion with host_lock held + * @evt: ibmvfc evt to complete * - * Returns a free event from the pool. + * All non-scsi command completion callbacks have the expectation that the + * host_lock is held. This callback is used by ibmvfc_init_event to wrap a + * MAD evt with the host_lock. **/ -static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost) +static void ibmvfc_locked_done(struct ibmvfc_event *evt) { - struct ibmvfc_event *evt; + unsigned long flags; - BUG_ON(list_empty(&vhost->free)); - evt = list_entry(vhost->free.next, struct ibmvfc_event, queue); - atomic_set(&evt->free, 0); - list_del(&evt->queue); - return evt; + spin_lock_irqsave(evt->vhost->host->host_lock, flags); + evt->_done(evt); + spin_unlock_irqrestore(evt->vhost->host->host_lock, flags); } /** @@ -1292,9 +1591,15 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt, { evt->cmnd = NULL; evt->sync_iu = NULL; - evt->crq.format = format; - evt->done = done; evt->eh_comp = NULL; + evt->crq.format = format; + if (format == IBMVFC_CMD_FORMAT) + evt->done = done; + else { + evt->_done = done; + evt->done = ibmvfc_locked_done; + } + evt->hwq = 0; } /** @@ -1310,15 +1615,15 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, struct scatterlist *sg; scsi_for_each_sg(scmd, sg, nseg, i) { - md[i].va = sg_dma_address(sg); - md[i].len = sg_dma_len(sg); + md[i].va = cpu_to_be64(sg_dma_address(sg)); + md[i].len = cpu_to_be32(sg_dma_len(sg)); md[i].key = 0; } } /** - * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields - * @scmd: Scsi_Cmnd with the scatterlist + * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields + * @scmd: struct scsi_cmnd with the scatterlist * @evt: ibmvfc event struct * @vfc_cmd: vfc_cmd that contains the memory descriptor * @dev: device for which to map dma memory @@ -1334,10 +1639,14 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, int sg_mapped; struct srp_direct_buf *data = &vfc_cmd->ioba; struct ibmvfc_host *vhost = dev_get_drvdata(dev); + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd); + + if (cls3_error) + vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR); sg_mapped = scsi_dma_map(scmd); if (!sg_mapped) { - vfc_cmd->flags |= IBMVFC_NO_MEM_DESC; + vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC); return 0; } else if (unlikely(sg_mapped < 0)) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) @@ -1346,11 +1655,11 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, } if (scmd->sc_data_direction == DMA_TO_DEVICE) { - vfc_cmd->flags |= IBMVFC_WRITE; - vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA; + vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE); + iu->add_cdb_len |= IBMVFC_WRDATA; } else { - vfc_cmd->flags |= IBMVFC_READ; - vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA; + vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ); + iu->add_cdb_len |= IBMVFC_RDDATA; } if (sg_mapped == 1) { @@ -1358,7 +1667,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, return 0; } - vfc_cmd->flags |= IBMVFC_SCATTERLIST; + vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST); if (!evt->ext_list) { evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC, @@ -1374,20 +1683,21 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list); - data->va = evt->ext_list_token; - data->len = sg_mapped * sizeof(struct srp_direct_buf); + data->va = cpu_to_be64(evt->ext_list_token); + data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf)); data->key = 0; return 0; } /** * ibmvfc_timeout - Internal command timeout handler - * @evt: struct ibmvfc_event that timed out + * @t: struct ibmvfc_event that timed out * * Called when an internally generated command times out **/ -static void ibmvfc_timeout(struct ibmvfc_event *evt) +static void ibmvfc_timeout(struct timer_list *t) { + struct ibmvfc_event *evt = timer_container_of(evt, t, timer); struct ibmvfc_host *vhost = evt->vhost; dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); ibmvfc_reset_host(vhost); @@ -1404,33 +1714,47 @@ static void ibmvfc_timeout(struct ibmvfc_event *evt) static int ibmvfc_send_event(struct ibmvfc_event *evt, struct ibmvfc_host *vhost, unsigned long timeout) { - u64 *crq_as_u64 = (u64 *) &evt->crq; + __be64 *crq_as_u64 = (__be64 *) &evt->crq; + unsigned long flags; int rc; /* Copy the IU into the transfer area */ *evt->xfer_iu = evt->iu; if (evt->crq.format == IBMVFC_CMD_FORMAT) - evt->xfer_iu->cmd.tag = (u64)evt; + evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt); else if (evt->crq.format == IBMVFC_MAD_FORMAT) - evt->xfer_iu->mad_common.tag = (u64)evt; + evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt); else BUG(); - list_add_tail(&evt->queue, &vhost->sent); - init_timer(&evt->timer); + timer_setup(&evt->timer, ibmvfc_timeout, 0); if (timeout) { - evt->timer.data = (unsigned long) evt; evt->timer.expires = jiffies + (timeout * HZ); - evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout; add_timer(&evt->timer); } + spin_lock_irqsave(&evt->queue->l_lock, flags); + list_add_tail(&evt->queue_list, &evt->queue->sent); + atomic_set(&evt->active, 1); + mb(); - if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) { - list_del(&evt->queue); - del_timer(&evt->timer); + if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT) + rc = ibmvfc_send_sub_crq(vhost, + evt->queue->vios_cookie, + be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1]), + 0, 0); + else + rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + if (rc) { + atomic_set(&evt->active, 0); + list_del(&evt->queue_list); + spin_unlock_irqrestore(&evt->queue->l_lock, flags); + timer_delete(&evt->timer); /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. * Firmware will send a CRQ with a transport event (0xFF) to @@ -1451,11 +1775,13 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, evt->cmnd->result = DID_ERROR << 16; evt->done = ibmvfc_scsi_eh_done; } else - evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR; + evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR); evt->done(evt); - } else + } else { + spin_unlock_irqrestore(&evt->queue->l_lock, flags); ibmvfc_trc_start(evt); + } return 0; } @@ -1469,10 +1795,10 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) { struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; struct ibmvfc_host *vhost = evt->vhost; - struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); struct scsi_cmnd *cmnd = evt->cmnd; const char *err = unknown_error; - int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error); + int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error)); int logerr = 0; int rsp_code = 0; @@ -1487,9 +1813,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) if (rsp->flags & FCP_RSP_LEN_VALID) rsp_code = rsp->data.info.rsp_code; - scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " + scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) " "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", - cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, + cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error), rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); } @@ -1503,15 +1829,18 @@ static void ibmvfc_relogin(struct scsi_device *sdev) struct ibmvfc_host *vhost = shost_priv(sdev->host); struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct ibmvfc_target *tgt; + unsigned long flags; + spin_lock_irqsave(vhost->host->host_lock, flags); list_for_each_entry(tgt, &vhost->targets, queue) { if (rport == tgt->rport) { - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); break; } } ibmvfc_reinit_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); } /** @@ -1523,29 +1852,30 @@ static void ibmvfc_relogin(struct scsi_device *sdev) static void ibmvfc_scsi_done(struct ibmvfc_event *evt) { struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd; - struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp; + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd); struct scsi_cmnd *cmnd = evt->cmnd; u32 rsp_len = 0; - u32 sense_len = rsp->fcp_sense_len; + u32 sense_len = be32_to_cpu(rsp->fcp_sense_len); if (cmnd) { - if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID) - scsi_set_resid(cmnd, vfc_cmd->adapter_resid); + if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID) + scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid)); else if (rsp->flags & FCP_RESID_UNDER) - scsi_set_resid(cmnd, rsp->fcp_resid); + scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid)); else scsi_set_resid(cmnd, 0); if (vfc_cmd->status) { - cmnd->result = ibmvfc_get_err_result(vfc_cmd); + cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd); if (rsp->flags & FCP_RSP_LEN_VALID) - rsp_len = rsp->fcp_rsp_len; + rsp_len = be32_to_cpu(rsp->fcp_rsp_len); if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len; if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); - if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) + if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) && + (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED)) ibmvfc_relogin(cmnd->device); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) @@ -1559,12 +1889,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) cmnd->result = (DID_ERROR << 16); scsi_dma_unmap(cmnd); - cmnd->scsi_done(cmnd); + scsi_done(cmnd); } - if (evt->eh_comp) - complete(evt->eh_comp); - ibmvfc_free_event(evt); } @@ -1593,69 +1920,95 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) case IBMVFC_ACTIVE: result = 0; break; - }; + } return result; } +static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd; + struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); + size_t offset; + + memset(vfc_cmd, 0, sizeof(*vfc_cmd)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + offset = offsetof(struct ibmvfc_cmd, v2.rsp); + vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name); + } else + offset = offsetof(struct ibmvfc_cmd, v1.rsp); + vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset); + vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp)); + vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE); + vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu)); + vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp)); + vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); + vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id); + int_to_scsilun(sdev->lun, &iu->lun); + + return vfc_cmd; +} + /** * ibmvfc_queuecommand - The queuecommand function of the scsi template + * @shost: scsi host struct * @cmnd: struct scsi_cmnd to be executed - * @done: Callback function to be called when cmnd is completed * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, - void (*done) (struct scsi_cmnd *)) +static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { - struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); + struct ibmvfc_host *vhost = shost_priv(shost); struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); struct ibmvfc_cmd *vfc_cmd; + struct ibmvfc_fcp_cmd_iu *iu; struct ibmvfc_event *evt; - u8 tag[2]; + u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); + u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); + u16 scsi_channel; int rc; if (unlikely((rc = fc_remote_port_chkready(rport))) || unlikely((rc = ibmvfc_host_chkready(vhost)))) { cmnd->result = rc; - done(cmnd); + scsi_done(cmnd); return 0; } cmnd->result = (DID_OK << 16); - evt = ibmvfc_get_event(vhost); + if (vhost->using_channels) { + scsi_channel = hwq % vhost->scsi_scrqs.active_queues; + evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]); + if (!evt) + return SCSI_MLQUEUE_HOST_BUSY; + + evt->hwq = hwq % vhost->scsi_scrqs.active_queues; + } else { + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) + return SCSI_MLQUEUE_HOST_BUSY; + } + ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT); evt->cmnd = cmnd; - cmnd->scsi_done = done; - vfc_cmd = &evt->iu.cmd; - memset(vfc_cmd, 0, sizeof(*vfc_cmd)); - vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); - vfc_cmd->resp.len = sizeof(vfc_cmd->rsp); - vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE; - vfc_cmd->payload_len = sizeof(vfc_cmd->iu); - vfc_cmd->resp_len = sizeof(vfc_cmd->rsp); - vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata; - vfc_cmd->tgt_scsi_id = rport->port_id; - vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd); - int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); - memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); - - if (scsi_populate_tag_msg(cmnd, tag)) { - vfc_cmd->task_tag = tag[1]; - switch (tag[0]) { - case MSG_SIMPLE_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; - break; - case MSG_HEAD_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE; - break; - case MSG_ORDERED_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK; - break; - }; + + vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device); + iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); + + iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd)); + memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); + + if (cmnd->flags & SCMD_TAGGED) { + vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag); + iu->pri_task_attr = IBMVFC_SIMPLE_TASK; } + vfc_cmd->correlation = cpu_to_be64((u64)evt); + if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) return ibmvfc_send_event(evt, vhost, 0); @@ -1668,12 +2021,10 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, "Failed to map DMA buffer for command. rc=%d\n", rc); cmnd->result = DID_ERROR << 16; - done(cmnd); + scsi_done(cmnd); return 0; } -static DEF_SCSI_QCMD(ibmvfc_queuecommand) - /** * ibmvfc_sync_completion - Signal that a synchronous command has completed * @evt: ibmvfc event struct @@ -1704,14 +2055,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt) /** * ibmvfc_bsg_timeout - Handle a BSG timeout - * @job: struct fc_bsg_job that timed out + * @job: struct bsg_job that timed out * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) +static int ibmvfc_bsg_timeout(struct bsg_job *job) { - struct ibmvfc_host *vhost = shost_priv(job->shost); + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); unsigned long port_id = (unsigned long)job->dd_data; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; @@ -1727,17 +2078,22 @@ static int ibmvfc_bsg_timeout(struct fc_bsg_job *job) } vhost->aborting_passthru = 1; - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT); tmf = &evt->iu.tmf; memset(tmf, 0, sizeof(*tmf)); - tmf->common.version = 1; - tmf->common.opcode = IBMVFC_TMF_MAD; - tmf->common.length = sizeof(*tmf); - tmf->scsi_id = port_id; - tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; - tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY; + tmf->common.version = cpu_to_be32(1); + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(port_id); + tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); + tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY); rc = ibmvfc_send_event(evt, vhost, default_timeout); if (rc != 0) { @@ -1785,14 +2141,18 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id) if (unlikely((rc = ibmvfc_host_chkready(vhost)))) goto unlock_out; - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + rc = -ENOMEM; + goto unlock_out; + } ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); plogi = &evt->iu.plogi; memset(plogi, 0, sizeof(*plogi)); - plogi->common.version = 1; - plogi->common.opcode = IBMVFC_PORT_LOGIN; - plogi->common.length = sizeof(*plogi); - plogi->scsi_id = port_id; + plogi->common.version = cpu_to_be32(1); + plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); + plogi->common.length = cpu_to_be16(sizeof(*plogi)); + plogi->scsi_id = cpu_to_be64(port_id); evt->sync_iu = &rsp_iu; init_completion(&evt->comp); @@ -1817,47 +2177,51 @@ unlock_out: /** * ibmvfc_bsg_request - Handle a BSG request - * @job: struct fc_bsg_job to be executed + * @job: struct bsg_job to be executed * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_bsg_request(struct fc_bsg_job *job) +static int ibmvfc_bsg_request(struct bsg_job *job) { - struct ibmvfc_host *vhost = shost_priv(job->shost); - struct fc_rport *rport = job->rport; + struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job)); + struct fc_rport *rport = fc_bsg_to_rport(job); struct ibmvfc_passthru_mad *mad; struct ibmvfc_event *evt; union ibmvfc_iu rsp_iu; unsigned long flags, port_id = -1; - unsigned int code = job->request->msgcode; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + unsigned int code = bsg_request->msgcode; int rc = 0, req_seg, rsp_seg, issue_login = 0; u32 fc_flags, rsp_len; ENTER; - job->reply->reply_payload_rcv_len = 0; + bsg_reply->reply_payload_rcv_len = 0; if (rport) port_id = rport->port_id; switch (code) { case FC_BSG_HST_ELS_NOLOGIN: - port_id = (job->request->rqst_data.h_els.port_id[0] << 16) | - (job->request->rqst_data.h_els.port_id[1] << 8) | - job->request->rqst_data.h_els.port_id[2]; + port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | + (bsg_request->rqst_data.h_els.port_id[1] << 8) | + bsg_request->rqst_data.h_els.port_id[2]; + fallthrough; case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; case FC_BSG_HST_CT: issue_login = 1; - port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) | - (job->request->rqst_data.h_ct.port_id[1] << 8) | - job->request->rqst_data.h_ct.port_id[2]; + port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | + (bsg_request->rqst_data.h_ct.port_id[1] << 8) | + bsg_request->rqst_data.h_ct.port_id[2]; + fallthrough; case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; default: return -ENOTSUPP; - }; + } if (port_id == -1) return -EINVAL; @@ -1899,31 +2263,36 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job) goto out; } - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + rc = -ENOMEM; + goto out; + } ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); mad = &evt->iu.passthru; memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_PASSTHRU; - mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); - - mad->cmd_ioba.va = (u64)evt->crq.ioba + - offsetof(struct ibmvfc_passthru_mad, iu); - mad->cmd_ioba.len = sizeof(mad->iu); - - mad->iu.cmd_len = job->request_payload.payload_len; - mad->iu.rsp_len = job->reply_payload.payload_len; - mad->iu.flags = fc_flags; - mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY; - - mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list); - mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list); - mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list); - mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list); - mad->iu.scsi_id = port_id; - mad->iu.tag = (u64)evt; - rsp_len = mad->iu.rsp.len; + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); + mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); + + mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, iu)); + mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); + + mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len); + mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len); + mad->iu.flags = cpu_to_be32(fc_flags); + mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY); + + mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list)); + mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list)); + mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list)); + mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list)); + mad->iu.scsi_id = cpu_to_be64(port_id); + mad->iu.tag = cpu_to_be64((u64)evt); + rsp_len = be32_to_cpu(mad->iu.rsp.len); evt->sync_iu = &rsp_iu; init_completion(&evt->comp); @@ -1940,13 +2309,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job) if (rsp_iu.passthru.common.status) rc = -EIO; else - job->reply->reply_payload_rcv_len = rsp_len; + bsg_reply->reply_payload_rcv_len = rsp_len; spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); - job->reply->result = rc; - job->job_done(job); + bsg_reply->result = rc; + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); rc = 0; out: dma_unmap_sg(vhost->dev, job->request_payload.sg_list, @@ -1974,28 +2344,32 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) struct ibmvfc_cmd *tmf; struct ibmvfc_event *evt = NULL; union ibmvfc_iu rsp_iu; - struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; + struct ibmvfc_fcp_cmd_iu *iu; + struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); int rsp_rc = -EBUSY; unsigned long flags; int rsp_code = 0; spin_lock_irqsave(vhost->host->host_lock, flags); if (vhost->state == IBMVFC_ACTIVE) { - evt = ibmvfc_get_event(vhost); + if (vhost->using_channels) + evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]); + else + evt = ibmvfc_get_event(&vhost->crq); + + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + tmf = ibmvfc_init_vfc_cmd(evt, sdev); + iu = ibmvfc_get_fcp_iu(vhost, tmf); - tmf = &evt->iu.cmd; - memset(tmf, 0, sizeof(*tmf)); - tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); - tmf->resp.len = sizeof(tmf->rsp); - tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; - tmf->payload_len = sizeof(tmf->iu); - tmf->resp_len = sizeof(tmf->rsp); - tmf->cancel_key = (unsigned long)sdev->hostdata; - tmf->tgt_scsi_id = rport->port_id; - int_to_scsilun(sdev->lun, &tmf->iu.lun); - tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); - tmf->iu.tmf_flags = type; + tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + tmf->target_wwpn = cpu_to_be64(rport->port_name); + iu->tmf_flags = type; evt->sync_iu = &rsp_iu; init_completion(&evt->comp); @@ -2013,16 +2387,16 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) wait_for_completion(&evt->comp); if (rsp_iu.cmd.status) - rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) rsp_code = fc_rsp->data.info.rsp_code; sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " - "flags: %x fcp_rsp: %x, scsi_status: %x\n", - desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), - rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, + ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else @@ -2037,7 +2411,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc) /** * ibmvfc_match_rport - Match function for specified remote port * @evt: ibmvfc event struct - * @device: device to match (rport) + * @rport: device to match * * Returns: * 1 if event matches rport / 0 if event does not match rport @@ -2085,6 +2459,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device) } /** + * ibmvfc_event_is_free - Check if event is free or not + * @evt: ibmvfc event struct + * + * Returns: + * true / false + **/ +static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) +{ + struct ibmvfc_event *loop_evt; + + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) + if (loop_evt == evt) + return true; + + return false; +} + +/** * ibmvfc_wait_for_ops - Wait for ops to complete * @vhost: ibmvfc host struct * @device: device to match (starget or sdev) @@ -2098,19 +2490,35 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, { struct ibmvfc_event *evt; DECLARE_COMPLETION_ONSTACK(comp); - int wait; + int wait, i, q_index, q_size; unsigned long flags; signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; + struct ibmvfc_queue *queues; ENTER; + if (vhost->mq_enabled && vhost->using_channels) { + queues = vhost->scsi_scrqs.scrqs; + q_size = vhost->scsi_scrqs.active_queues; + } else { + queues = &vhost->crq; + q_size = 1; + } + do { wait = 0; spin_lock_irqsave(vhost->host->host_lock, flags); - list_for_each_entry(evt, &vhost->sent, queue) { - if (match(evt, device)) { - evt->eh_comp = ∁ - wait++; + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = ∁ + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -2120,11 +2528,18 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, if (!timeout) { wait = 0; spin_lock_irqsave(vhost->host->host_lock, flags); - list_for_each_entry(evt, &vhost->sent, queue) { - if (match(evt, device)) { - evt->eh_comp = NULL; - wait++; + for (q_index = 0; q_index < q_size; q_index++) { + spin_lock(&queues[q_index].l_lock); + for (i = 0; i < queues[q_index].evt_pool.size; i++) { + evt = &queues[q_index].evt_pool.events[i]; + if (!ibmvfc_event_is_free(evt)) { + if (match(evt, device)) { + evt->eh_comp = NULL; + wait++; + } + } } + spin_unlock(&queues[q_index].l_lock); } spin_unlock_irqrestore(vhost->host->host_lock, flags); if (wait) @@ -2139,23 +2554,130 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, return SUCCESS; } -/** - * ibmvfc_cancel_all - Cancel all outstanding commands to the device - * @sdev: scsi device to cancel commands - * @type: type of error recovery being performed - * - * This sends a cancel to the VIOS for the specified device. This does - * NOT send any abort to the actual device. That must be done separately. - * - * Returns: - * 0 on success / other on failure - **/ -static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) +static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue, + struct scsi_device *sdev, + int type) { struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); struct fc_rport *rport = starget_to_rport(starget); + struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; + + evt = ibmvfc_get_reserved_event(queue); + if (!evt) + return NULL; + ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); + + tmf = &evt->iu.tmf; + memset(tmf, 0, sizeof(*tmf)); + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + tmf->common.version = cpu_to_be32(2); + tmf->target_wwpn = cpu_to_be64(rport->port_name); + } else { + tmf->common.version = cpu_to_be32(1); + } + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(rport->port_id); + int_to_scsilun(sdev->lun, &tmf->lun); + if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID)); + else + tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID)); + tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata); + tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata); + + init_completion(&evt->comp); + + return evt; +} + +static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct ibmvfc_event *evt, *found_evt, *temp; + struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs; + unsigned long flags; + int num_hwq, i; + int fail = 0; + LIST_HEAD(cancelq); + u16 status; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + num_hwq = vhost->scsi_scrqs.active_queues; + for (i = 0; i < num_hwq; i++) { + spin_lock(queues[i].q_lock); + spin_lock(&queues[i].l_lock); + found_evt = NULL; + list_for_each_entry(evt, &queues[i].sent, queue_list) { + if (evt->cmnd && evt->cmnd->device == sdev) { + found_evt = evt; + break; + } + } + spin_unlock(&queues[i].l_lock); + + if (found_evt && vhost->logged_in) { + evt = ibmvfc_init_tmf(&queues[i], sdev, type); + if (!evt) { + spin_unlock(queues[i].q_lock); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } + evt->sync_iu = &queues[i].cancel_rsp; + ibmvfc_send_event(evt, vhost, default_timeout); + list_add_tail(&evt->cancel, &cancelq); + } + + spin_unlock(queues[i].q_lock); + } + spin_unlock_irqrestore(vhost->host->host_lock, flags); + + if (list_empty(&cancelq)) { + if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) + sdev_printk(KERN_INFO, sdev, "No events found to cancel\n"); + return 0; + } + + sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); + + list_for_each_entry_safe(evt, temp, &cancelq, cancel) { + wait_for_completion(&evt->comp); + status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status); + list_del(&evt->cancel); + ibmvfc_free_event(evt); + + if (status != IBMVFC_MAD_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + * the caller will wait for the command being cancelled to get returned + */ + break; + default: + fail = 1; + break; + } + } + } + + if (fail) + return -EIO; + + sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); + LEAVE; + return 0; +} + +static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); struct ibmvfc_event *evt, *found_evt; union ibmvfc_iu rsp; int rsp_rc = -EBUSY; @@ -2163,14 +2685,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) u16 status; ENTER; - spin_lock_irqsave(vhost->host->host_lock, flags); found_evt = NULL; - list_for_each_entry(evt, &vhost->sent, queue) { + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(&vhost->crq.l_lock); + list_for_each_entry(evt, &vhost->crq.sent, queue_list) { if (evt->cmnd && evt->cmnd->device == sdev) { found_evt = evt; break; } } + spin_unlock(&vhost->crq.l_lock); if (!found_evt) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) @@ -2180,27 +2704,8 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) } if (vhost->logged_in) { - evt = ibmvfc_get_event(vhost); - ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); - - tmf = &evt->iu.tmf; - memset(tmf, 0, sizeof(*tmf)); - tmf->common.version = 1; - tmf->common.opcode = IBMVFC_TMF_MAD; - tmf->common.length = sizeof(*tmf); - tmf->scsi_id = rport->port_id; - int_to_scsilun(sdev->lun, &tmf->lun); - if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) - type &= ~IBMVFC_TMF_SUPPRESS_ABTS; - if (vhost->state == IBMVFC_ACTIVE) - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); - else - tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); - tmf->cancel_key = (unsigned long)sdev->hostdata; - tmf->my_cancel_key = (unsigned long)starget->hostdata; - + evt = ibmvfc_init_tmf(&vhost->crq, sdev, type); evt->sync_iu = &rsp; - init_completion(&evt->comp); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); } @@ -2208,20 +2713,31 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); - return -EIO; + /* If failure is received, the host adapter is most likely going + through reset, return success so the caller will wait for the command + being cancelled to get returned */ + return 0; } sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); wait_for_completion(&evt->comp); - status = rsp.mad_common.status; + status = be16_to_cpu(rsp.mad_common.status); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_free_event(evt); spin_unlock_irqrestore(vhost->host->host_lock, flags); if (status != IBMVFC_MAD_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); - return -EIO; + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + the caller will wait for the command being cancelled to get returned */ + return 0; + default: + return -EIO; + }; } sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); @@ -2229,6 +2745,27 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) } /** + * ibmvfc_cancel_all - Cancel all outstanding commands to the device + * @sdev: scsi device to cancel commands + * @type: type of error recovery being performed + * + * This sends a cancel to the VIOS for the specified device. This does + * NOT send any abort to the actual device. That must be done separately. + * + * Returns: + * 0 on success / other on failure + **/ +static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + + if (vhost->mq_enabled && vhost->using_channels) + return ibmvfc_cancel_all_mq(sdev, type); + else + return ibmvfc_cancel_all_sq(sdev, type); +} + +/** * ibmvfc_match_key - Match function for specified cancel key * @evt: ibmvfc event struct * @key: cancel key to match @@ -2241,7 +2778,7 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key) unsigned long cancel_key = (unsigned long)key; if (evt->crq.format == IBMVFC_CMD_FORMAT && - evt->iu.cmd.cancel_key == cancel_key) + be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key) return 1; return 0; } @@ -2278,19 +2815,22 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) struct ibmvfc_cmd *tmf; struct ibmvfc_event *evt, *found_evt; union ibmvfc_iu rsp_iu; - struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp; + struct ibmvfc_fcp_cmd_iu *iu; + struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd); int rc, rsp_rc = -EBUSY; unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT; int rsp_code = 0; - spin_lock_irqsave(vhost->host->host_lock, flags); found_evt = NULL; - list_for_each_entry(evt, &vhost->sent, queue) { + spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(&vhost->crq.l_lock); + list_for_each_entry(evt, &vhost->crq.sent, queue_list) { if (evt->cmnd && evt->cmnd->device == sdev) { found_evt = evt; break; } } + spin_unlock(&vhost->crq.l_lock); if (!found_evt) { if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) @@ -2300,23 +2840,23 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) } if (vhost->state == IBMVFC_ACTIVE) { - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_event(&vhost->crq); + if (!evt) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return -ENOMEM; + } ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT); + tmf = ibmvfc_init_vfc_cmd(evt, sdev); + iu = ibmvfc_get_fcp_iu(vhost, tmf); - tmf = &evt->iu.cmd; - memset(tmf, 0, sizeof(*tmf)); - tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp); - tmf->resp.len = sizeof(tmf->rsp); - tmf->frame_type = IBMVFC_SCSI_FCP_TYPE; - tmf->payload_len = sizeof(tmf->iu); - tmf->resp_len = sizeof(tmf->rsp); - tmf->cancel_key = (unsigned long)sdev->hostdata; - tmf->tgt_scsi_id = rport->port_id; - int_to_scsilun(sdev->lun, &tmf->iu.lun); - tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF); - tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET; + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) + tmf->target_wwpn = cpu_to_be64(rport->port_name); + iu->tmf_flags = IBMVFC_ABORT_TASK_SET; + tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); evt->sync_iu = &rsp_iu; + tmf->correlation = cpu_to_be64((u64)evt); + init_completion(&evt->comp); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); } @@ -2361,7 +2901,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) } if (rsp_iu.cmd.status) - rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd); + rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd); if (rsp_code) { if (fc_rsp->flags & FCP_RSP_LEN_VALID) @@ -2369,8 +2909,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " "flags: %x fcp_rsp: %x, scsi_status: %x\n", - ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error), - rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, + ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), + be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code, fc_rsp->scsi_status); rsp_rc = -EIO; } else @@ -2462,18 +3002,6 @@ static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) } /** - * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function - * @sdev: scsi device struct - * @data: return code - * - **/ -static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) -{ - unsigned long *rc = data; - *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET); -} - -/** * ibmvfc_eh_target_reset_handler - Reset the target * @cmd: scsi command struct * @@ -2482,22 +3010,38 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { - struct scsi_device *sdev = cmd->device; - struct ibmvfc_host *vhost = shost_priv(sdev->host); - struct scsi_target *starget = scsi_target(sdev); + struct scsi_target *starget = scsi_target(cmd->device); + struct fc_rport *rport = starget_to_rport(starget); + struct Scsi_Host *shost = rport_to_shost(rport); + struct ibmvfc_host *vhost = shost_priv(shost); int block_rc; int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; + bool tgt_reset = false; ENTER; - block_rc = fc_block_scsi_eh(cmd); + block_rc = fc_block_rport(rport); ibmvfc_wait_while_resetting(vhost); if (block_rc != FAST_IO_FAIL) { - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + if ((sdev->channel != starget->channel) || + (sdev->id != starget->id)) + continue; + + cancel_rc |= ibmvfc_cancel_all(sdev, + IBMVFC_TMF_TGT_RESET); + if (!tgt_reset) { + reset_rc = ibmvfc_reset_device(sdev, + IBMVFC_TARGET_RESET, "target"); + tgt_reset = true; + } + } } else - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); + starget_for_each_device(starget, &cancel_rc, + ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); @@ -2516,16 +3060,12 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc, block_rc; + int rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); - if (block_rc == FAST_IO_FAIL) - return FAST_IO_FAIL; - return rc ? FAILED : SUCCESS; } @@ -2542,7 +3082,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) struct ibmvfc_host *vhost = shost_priv(shost); struct fc_rport *dev_rport; struct scsi_device *sdev; - unsigned long rc; + struct ibmvfc_target *tgt; + unsigned long rc, flags; + unsigned int found; ENTER; shost_for_each_device(sdev, shost) { @@ -2556,6 +3098,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) if (rc == FAILED) ibmvfc_issue_fc_host_lip(shost); + + spin_lock_irqsave(shost->host_lock, flags); + found = 0; + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->scsi_id == rport->port_id) { + found++; + break; + } + } + + if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + /* + * If we get here, that means we previously attempted to send + * an implicit logout to the target but it failed, most likely + * due to I/O being pending, so we need to send it again + */ + ibmvfc_del_tgt(tgt); + ibmvfc_reinit_host(vhost); + } + + spin_unlock_irqrestore(shost->host_lock, flags); LEAVE; } @@ -2630,14 +3193,15 @@ static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state) static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { - const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event); + const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); struct ibmvfc_target *tgt; ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," - " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, + " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), + be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), ibmvfc_get_link_state(crq->link_state)); - switch (crq->event) { + switch (be64_to_cpu(crq->event)) { case IBMVFC_AE_RESUME: switch (crq->link_state) { case IBMVFC_AE_LS_LINK_DOWN: @@ -2653,7 +3217,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, vhost->delay_init = 1; __ibmvfc_reset_host(vhost); break; - }; + } break; case IBMVFC_AE_LINK_UP: @@ -2680,16 +3244,16 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, list_for_each_entry(tgt, &vhost->targets, queue) { if (!crq->scsi_id && !crq->wwpn && !crq->node_name) break; - if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) + if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) continue; - if (crq->wwpn && tgt->ids.port_name != crq->wwpn) + if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) continue; - if (crq->node_name && tgt->ids.node_name != crq->node_name) + if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) continue; - if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO) + if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) tgt->logo_rcvd = 1; - if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) { - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { + ibmvfc_del_tgt(tgt); ibmvfc_reinit_host(vhost); } } @@ -2707,19 +3271,21 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, default: dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); break; - }; + } } /** * ibmvfc_handle_crq - Handles and frees received events in the CRQ * @crq: Command/Response queue * @vhost: ibmvfc host struct + * @evt_doneq: Event done queue * - **/ -static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) +**/ +static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, + struct list_head *evt_doneq) { long rc; - struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba; + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); switch (crq->valid) { case IBMVFC_CRQ_INIT_RSP: @@ -2747,16 +3313,21 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ - dev_info(vhost->dev, "Re-enabling adapter\n"); + dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); - } else { - dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); + wake_up(&vhost->work_wait_q); + } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { + dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); + } else { + dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); } return; case IBMVFC_CRQ_CMD_RSP: @@ -2773,22 +3344,21 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) * things we send. Make sure this response is to something we * actually sent */ - if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { + if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) { dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", crq->ioba); return; } - if (unlikely(atomic_read(&evt->free))) { + if (unlikely(atomic_dec_if_positive(&evt->active))) { dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", crq->ioba); return; } - del_timer(&evt->timer); - list_del(&evt->queue); - ibmvfc_trc_end(evt); - evt->done(evt); + spin_lock(&evt->queue->l_lock); + list_move_tail(&evt->queue_list, evt_doneq); + spin_unlock(&evt->queue->l_lock); } /** @@ -2806,20 +3376,24 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) int done = 0; spin_lock_irqsave(shost->host_lock, flags); - if (time >= (init_timeout * HZ)) { + if (!vhost->scan_timeout) + done = 1; + else if (time >= (vhost->scan_timeout * HZ)) { dev_info(vhost->dev, "Scan taking longer than %d seconds, " - "continuing initialization\n", init_timeout); + "continuing initialization\n", vhost->scan_timeout); done = 1; } - if (vhost->scan_complete) + if (vhost->scan_complete) { + vhost->scan_timeout = init_timeout; done = 1; + } spin_unlock_irqrestore(shost->host_lock, flags); return done; } /** - * ibmvfc_slave_alloc - Setup the device's task set value + * ibmvfc_sdev_init - Setup the device's task set value * @sdev: struct scsi_device device to configure * * Set the device's task set value so that error handling works as @@ -2828,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) * Returns: * 0 on success / -ENXIO if device does not exist **/ -static int ibmvfc_slave_alloc(struct scsi_device *sdev) +static int ibmvfc_sdev_init(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); @@ -2867,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget) } /** - * ibmvfc_slave_configure - Configure the device + * ibmvfc_sdev_configure - Configure the device * @sdev: struct scsi_device device to configure + * @lim: Request queue limits * * Enable allow_restart for a device if it is a disk. Adjust the * queue_depth here also. @@ -2876,20 +3451,17 @@ static int ibmvfc_target_alloc(struct scsi_target *starget) * Returns: * 0 **/ -static int ibmvfc_slave_configure(struct scsi_device *sdev) +static int ibmvfc_sdev_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct Scsi_Host *shost = sdev->host; unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); - if (sdev->type == TYPE_DISK) + if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; - - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); - scsi_activate_tcq(sdev, sdev->queue_depth); - } else - scsi_deactivate_tcq(sdev, sdev->queue_depth); + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + } spin_unlock_irqrestore(shost->host_lock, flags); return 0; } @@ -2898,45 +3470,16 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) * ibmvfc_change_queue_depth - Change the device's queue depth * @sdev: scsi device struct * @qdepth: depth to set - * @reason: calling context * * Return value: * actual depth set **/ -static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) +static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) qdepth = IBMVFC_MAX_CMDS_PER_LUN; - scsi_adjust_queue_depth(sdev, 0, qdepth); - return sdev->queue_depth; -} - -/** - * ibmvfc_change_queue_type - Change the device's queue type - * @sdev: scsi device struct - * @tag_type: type of tags to use - * - * Return value: - * actual queue type set - **/ -static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; + return scsi_change_queue_depth(sdev, qdepth); } static ssize_t ibmvfc_show_host_partition_name(struct device *dev, @@ -2945,8 +3488,7 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%s\n", - vhost->login_buf->resp.partition_name); + return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name); } static ssize_t ibmvfc_show_host_device_name(struct device *dev, @@ -2955,8 +3497,7 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%s\n", - vhost->login_buf->resp.device_name); + return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name); } static ssize_t ibmvfc_show_host_loc_code(struct device *dev, @@ -2965,8 +3506,7 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%s\n", - vhost->login_buf->resp.port_loc_code); + return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code); } static ssize_t ibmvfc_show_host_drc_name(struct device *dev, @@ -2975,8 +3515,7 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%s\n", - vhost->login_buf->resp.drc_name); + return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name); } static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, @@ -2984,7 +3523,8 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); + return sysfs_emit(buf, "%d\n", + be32_to_cpu(vhost->login_buf->resp.version)); } static ssize_t ibmvfc_show_host_capabilities(struct device *dev, @@ -2992,12 +3532,14 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev, { struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); + return sysfs_emit(buf, "%llx\n", + be64_to_cpu(vhost->login_buf->resp.capabilities)); } /** * ibmvfc_show_log_level - Show the adapter's error logging level * @dev: class device struct + * @attr: unused * @buf: buffer * * Return value: @@ -3012,7 +3554,7 @@ static ssize_t ibmvfc_show_log_level(struct device *dev, int len; spin_lock_irqsave(shost->host_lock, flags); - len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level); + len = sysfs_emit(buf, "%d\n", vhost->log_level); spin_unlock_irqrestore(shost->host_lock, flags); return len; } @@ -3020,7 +3562,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev, /** * ibmvfc_store_log_level - Change the adapter's error logging level * @dev: class device struct + * @attr: unused * @buf: buffer + * @count: buffer size * * Return value: * number of bytes printed to buffer @@ -3039,6 +3583,39 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, return strlen(buf); } +static ssize_t ibmvfc_show_scsi_channels(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + struct ibmvfc_channels *scsi = &vhost->scsi_scrqs; + unsigned long flags = 0; + int len; + + spin_lock_irqsave(shost->host_lock, flags); + len = sysfs_emit(buf, "%d\n", scsi->desired_queues); + spin_unlock_irqrestore(shost->host_lock, flags); + return len; +} + +static ssize_t ibmvfc_store_scsi_channels(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + struct ibmvfc_channels *scsi = &vhost->scsi_scrqs; + unsigned long flags = 0; + unsigned int channels; + + spin_lock_irqsave(shost->host_lock, flags); + channels = simple_strtoul(buf, NULL, 10); + scsi->desired_queues = min(channels, shost->nr_hw_queues); + ibmvfc_hard_reset_host(vhost); + spin_unlock_irqrestore(shost->host_lock, flags); + return strlen(buf); +} + static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); @@ -3047,6 +3624,8 @@ static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, ibmvfc_show_log_level, ibmvfc_store_log_level); +static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR, + ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels); #ifdef CONFIG_SCSI_IBMVFC_TRACE /** @@ -3062,10 +3641,10 @@ static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, * number of bytes printed to buffer **/ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct Scsi_Host *shost = class_to_shost(dev); struct ibmvfc_host *vhost = shost_priv(shost); unsigned long flags = 0; @@ -3085,7 +3664,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj, return count; } -static struct bin_attribute ibmvfc_trace_attr = { +static const struct bin_attribute ibmvfc_trace_attr = { .attr = { .name = "trace", .mode = S_IRUGO, @@ -3095,39 +3674,42 @@ static struct bin_attribute ibmvfc_trace_attr = { }; #endif -static struct device_attribute *ibmvfc_attrs[] = { - &dev_attr_partition_name, - &dev_attr_device_name, - &dev_attr_port_loc_code, - &dev_attr_drc_name, - &dev_attr_npiv_version, - &dev_attr_capabilities, - &dev_attr_log_level, +static struct attribute *ibmvfc_host_attrs[] = { + &dev_attr_partition_name.attr, + &dev_attr_device_name.attr, + &dev_attr_port_loc_code.attr, + &dev_attr_drc_name.attr, + &dev_attr_npiv_version.attr, + &dev_attr_capabilities.attr, + &dev_attr_log_level.attr, + &dev_attr_nr_scsi_channels.attr, NULL }; -static struct scsi_host_template driver_template = { +ATTRIBUTE_GROUPS(ibmvfc_host); + +static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "IBM POWER Virtual FC Adapter", .proc_name = IBMVFC_NAME, .queuecommand = ibmvfc_queuecommand, + .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = ibmvfc_eh_abort_handler, .eh_device_reset_handler = ibmvfc_eh_device_reset_handler, .eh_target_reset_handler = ibmvfc_eh_target_reset_handler, .eh_host_reset_handler = ibmvfc_eh_host_reset_handler, - .slave_alloc = ibmvfc_slave_alloc, - .slave_configure = ibmvfc_slave_configure, + .sdev_init = ibmvfc_sdev_init, + .sdev_configure = ibmvfc_sdev_configure, .target_alloc = ibmvfc_target_alloc, .scan_finished = ibmvfc_scan_finished, .change_queue_depth = ibmvfc_change_queue_depth, - .change_queue_type = ibmvfc_change_queue_type, .cmd_per_lun = 16, .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = IBMVFC_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .shost_attrs = ibmvfc_attrs, + .shost_groups = ibmvfc_host_groups, + .track_queue_depth = 1, }; /** @@ -3139,10 +3721,10 @@ static struct scsi_host_template driver_template = { **/ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost) { - struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq; + struct ibmvfc_queue *async_crq = &vhost->async_crq; struct ibmvfc_async_crq *crq; - crq = &async_crq->msgs[async_crq->cur]; + crq = &async_crq->msgs.async[async_crq->cur]; if (crq->valid & 0x80) { if (++async_crq->cur == async_crq->size) async_crq->cur = 0; @@ -3162,10 +3744,10 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost) **/ static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost) { - struct ibmvfc_crq_queue *queue = &vhost->crq; + struct ibmvfc_queue *queue = &vhost->crq; struct ibmvfc_crq *crq; - crq = &queue->msgs[queue->cur]; + crq = &queue->msgs.crq[queue->cur]; if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; @@ -3209,10 +3791,13 @@ static void ibmvfc_tasklet(void *data) struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_crq *crq; struct ibmvfc_async_crq *async; + struct ibmvfc_event *evt, *temp; unsigned long flags; int done = 0; + LIST_HEAD(evt_doneq); spin_lock_irqsave(vhost->host->host_lock, flags); + spin_lock(vhost->crq.q_lock); while (!done) { /* Pull all the valid messages off the async CRQ */ while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { @@ -3223,7 +3808,7 @@ static void ibmvfc_tasklet(void *data) /* Pull all the valid messages off the CRQ */ while ((crq = ibmvfc_next_crq(vhost)) != NULL) { - ibmvfc_handle_crq(crq, vhost); + ibmvfc_handle_crq(crq, vhost, &evt_doneq); crq->valid = 0; wmb(); } @@ -3236,14 +3821,138 @@ static void ibmvfc_tasklet(void *data) wmb(); } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { vio_disable_interrupts(vdev); - ibmvfc_handle_crq(crq, vhost); + ibmvfc_handle_crq(crq, vhost, &evt_doneq); crq->valid = 0; wmb(); } else done = 1; } + spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); + + list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { + timer_delete(&evt->timer); + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable) +{ + struct device *dev = scrq->vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + unsigned long rc; + int irq_action = H_ENABLE_VIO_INTERRUPT; + + if (!enable) + irq_action = H_DISABLE_VIO_INTERRUPT; + + rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action, + scrq->hw_irq, 0, 0); + + if (rc) + dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n", + enable ? "enable" : "disable", scrq->hwq_id, rc); + + return rc; +} + +static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, + struct list_head *evt_doneq) +{ + struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); + + switch (crq->valid) { + case IBMVFC_CRQ_CMD_RSP: + break; + case IBMVFC_CRQ_XPORT_EVENT: + return; + default: + dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); + return; + } + + /* The only kind of payload CRQs we should get are responses to + * things we send. Make sure this response is to something we + * actually sent + */ + if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) { + dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n", + crq->ioba); + return; + } + + if (unlikely(atomic_dec_if_positive(&evt->active))) { + dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n", + crq->ioba); + return; + } + + spin_lock(&evt->queue->l_lock); + list_move_tail(&evt->queue_list, evt_doneq); + spin_unlock(&evt->queue->l_lock); +} + +static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + + crq = &scrq->msgs.scrq[scrq->cur].crq; + if (crq->valid & 0x80) { + if (++scrq->cur == scrq->size) + scrq->cur = 0; + rmb(); + } else + crq = NULL; + + return crq; +} + +static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq) +{ + struct ibmvfc_crq *crq; + struct ibmvfc_event *evt, *temp; + unsigned long flags; + int done = 0; + LIST_HEAD(evt_doneq); + + spin_lock_irqsave(scrq->q_lock, flags); + while (!done) { + while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } + + ibmvfc_toggle_scrq_irq(scrq, 1); + if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { + ibmvfc_toggle_scrq_irq(scrq, 0); + ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); + crq->valid = 0; + wmb(); + } else + done = 1; + } + spin_unlock_irqrestore(scrq->q_lock, flags); + + list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { + timer_delete(&evt->timer); + list_del(&evt->queue_list); + ibmvfc_trc_end(evt); + evt->done(evt); + } +} + +static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance) +{ + struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance; + + ibmvfc_toggle_scrq_irq(scrq, 0); + ibmvfc_drain_sub_crq(scrq); + + return IRQ_HANDLED; } /** @@ -3255,8 +3964,8 @@ static void ibmvfc_tasklet(void *data) static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT); - tgt->job_step = job_step; + if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT)) + tgt->job_step = job_step; wake_up(&tgt->vhost->work_wait_q); } @@ -3272,7 +3981,7 @@ static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); wake_up(&tgt->vhost->work_wait_q); return 0; } else @@ -3325,7 +4034,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; - u32 status = rsp->common.status; + u32 status = be16_to_cpu(rsp->common.status); int index, level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; @@ -3336,24 +4045,24 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) parms->type, parms->flags, parms->service_parms); if (parms->type == IBMVFC_SCSI_FCP_TYPE) { - index = ibmvfc_get_prli_rsp(parms->flags); + index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags)); if (prli_rsp[index].logged_in) { - if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) { + if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) { tgt->need_login = 0; tgt->ids.roles = 0; - if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC) + if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; - if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) + if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; tgt->add_rport = 1; } else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); } else if (prli_rsp[index].retry) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); } else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -3362,20 +4071,21 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED) + if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) && + be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else if (tgt->logo_rcvd) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); - else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), - rsp->status, rsp->error, status); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status); break; - }; + } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); @@ -3397,20 +4107,35 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt) return; kref_get(&tgt->kref); - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } vhost->discovery_threads++; ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; prli = &evt->iu.prli; memset(prli, 0, sizeof(*prli)); - prli->common.version = 1; - prli->common.opcode = IBMVFC_PROCESS_LOGIN; - prli->common.length = sizeof(*prli); - prli->scsi_id = tgt->scsi_id; + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + prli->common.version = cpu_to_be32(2); + prli->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + prli->common.version = cpu_to_be32(1); + } + prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN); + prli->common.length = cpu_to_be16(sizeof(*prli)); + prli->scsi_id = cpu_to_be64(tgt->scsi_id); prli->parms.type = IBMVFC_SCSI_FCP_TYPE; - prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR; - prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC; + prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR); + prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC); + prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED); + + if (cls3_error) + prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { @@ -3431,7 +4156,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; - u32 status = rsp->common.status; + u32 status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; @@ -3461,17 +4186,18 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status); break; - }; + } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); @@ -3494,17 +4220,28 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt) kref_get(&tgt->kref); tgt->logo_rcvd = 0; - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } vhost->discovery_threads++; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; plogi = &evt->iu.plogi; memset(plogi, 0, sizeof(*plogi)); - plogi->common.version = 1; - plogi->common.opcode = IBMVFC_PORT_LOGIN; - plogi->common.length = sizeof(*plogi); - plogi->scsi_id = tgt->scsi_id; + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + plogi->common.version = cpu_to_be32(2); + plogi->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + plogi->common.version = cpu_to_be32(1); + } + plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN); + plogi->common.length = cpu_to_be16(sizeof(*plogi)); + plogi->scsi_id = cpu_to_be64(tgt->scsi_id); if (ibmvfc_send_event(evt, vhost, default_timeout)) { vhost->discovery_threads--; @@ -3524,7 +4261,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout; - u32 status = rsp->common.status; + u32 status = be16_to_cpu(rsp->common.status); vhost->discovery_threads--; ibmvfc_free_event(evt); @@ -3542,42 +4279,66 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt) default: tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status); break; - }; + } - if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT) - ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); - else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS && - tgt->scsi_id != tgt->new_scsi_id) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi); kref_put(&tgt->kref, ibmvfc_release_tgt); wake_up(&vhost->work_wait_q); } /** + * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout + * @tgt: ibmvfc target struct + * @done: Routine to call when the event is responded to + * + * Returns: + * Allocated and initialized ibmvfc_event struct + **/ +static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt, + void (*done) (struct ibmvfc_event *)) +{ + struct ibmvfc_implicit_logout *mad; + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + kref_get(&tgt->kref); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) + return NULL; + ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + mad = &evt->iu.implicit_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->old_scsi_id = cpu_to_be64(tgt->scsi_id); + return evt; +} + +/** * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target * @tgt: ibmvfc target struct * **/ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) { - struct ibmvfc_implicit_logout *mad; struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; if (vhost->discovery_threads >= disc_threads) return; - kref_get(&tgt->kref); - evt = ibmvfc_get_event(vhost); vhost->discovery_threads++; - ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT); - evt->tgt = tgt; - mad = &evt->iu.implicit_logout; - memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT; - mad->common.length = sizeof(*mad); - mad->old_scsi_id = tgt->scsi_id; + evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, + ibmvfc_tgt_implicit_logout_done); + if (!evt) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { @@ -3589,6 +4350,164 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) } /** + * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; + u32 status = be16_to_cpu(mad->common.status); + + vhost->discovery_threads--; + ibmvfc_free_event(evt); + + /* + * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the + * driver in which case we need to free up all the targets. If we are + * not unloading, we will still go through a hard reset to get out of + * offline state, so there is no need to track the old targets in that + * case. + */ + if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE) + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + else + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT); + + tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed"); + kref_put(&tgt->kref, ibmvfc_release_tgt); + wake_up(&vhost->work_wait_q); +} + +/** + * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_event *evt; + + if (!vhost->logged_in) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + return; + } + + if (vhost->discovery_threads >= disc_threads) + return; + + vhost->discovery_threads++; + evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt, + ibmvfc_tgt_implicit_logout_and_del_done); + + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT); + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Implicit Logout\n"); +} + +/** + * ibmvfc_tgt_move_login_done - Completion handler for Move Login + * @evt: ibmvfc event struct + * + **/ +static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_target *tgt = evt->tgt; + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login; + u32 status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + switch (status) { + case IBMVFC_MAD_SUCCESS: + tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id); + tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name); + tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name); + tgt->scsi_id = tgt->new_scsi_id; + tgt->ids.port_id = tgt->scsi_id; + memcpy(&tgt->service_parms, &rsp->service_parms, + sizeof(tgt->service_parms)); + memcpy(&tgt->service_parms_change, &rsp->service_parms_change, + sizeof(tgt->service_parms_change)); + ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli); + break; + case IBMVFC_MAD_DRIVER_FAILED: + break; + case IBMVFC_MAD_CRQ_ERROR: + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); + break; + case IBMVFC_MAD_FAILED: + default: + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login); + + tgt_log(tgt, level, + "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n", + tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags), + status); + break; + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + ibmvfc_free_event(evt); + wake_up(&vhost->work_wait_q); +} + + +/** + * ibmvfc_tgt_move_login - Initiate a move login for specified target + * @tgt: ibmvfc target struct + * + **/ +static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt) +{ + struct ibmvfc_host *vhost = tgt->vhost; + struct ibmvfc_move_login *move; + struct ibmvfc_event *evt; + + if (vhost->discovery_threads >= disc_threads) + return; + + kref_get(&tgt->kref); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } + vhost->discovery_threads++; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); + ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT); + evt->tgt = tgt; + move = &evt->iu.move_login; + memset(move, 0, sizeof(*move)); + move->common.version = cpu_to_be32(1); + move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN); + move->common.length = cpu_to_be16(sizeof(*move)); + + move->old_scsi_id = cpu_to_be64(tgt->scsi_id); + move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id); + move->wwpn = cpu_to_be64(tgt->wwpn); + move->node_name = cpu_to_be64(tgt->ids.node_name); + + if (ibmvfc_send_event(evt, vhost, default_timeout)) { + vhost->discovery_threads--; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + kref_put(&tgt->kref, ibmvfc_release_tgt); + } else + tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id); +} + +/** * ibmvfc_adisc_needs_plogi - Does device need PLOGI? * @mad: ibmvfc passthru mad struct * @tgt: ibmvfc target struct @@ -3599,13 +4518,11 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt) static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad, struct ibmvfc_target *tgt) { - if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name, - sizeof(tgt->ids.port_name))) + if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name) return 1; - if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name, - sizeof(tgt->ids.node_name))) + if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name) return 1; - if (mad->fc_iu.response[6] != tgt->scsi_id) + if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id) return 1; return 0; } @@ -3620,33 +4537,33 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru; - u32 status = mad->common.status; + u32 status = be16_to_cpu(mad->common.status); u8 fc_reason, fc_explain; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); - del_timer(&tgt->timer); + timer_delete(&tgt->timer); switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; - fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; + ibmvfc_del_tgt(tgt); + fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16; + fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error), - mad->iu.status, mad->iu.error, + ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), + be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error), ibmvfc_get_fc_type(fc_reason), fc_reason, ibmvfc_get_ls_explain(fc_explain), fc_explain, status); break; - }; + } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); @@ -3663,22 +4580,22 @@ static void ibmvfc_init_passthru(struct ibmvfc_event *evt) struct ibmvfc_passthru_mad *mad = &evt->iu.passthru; memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_PASSTHRU; - mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu); - mad->cmd_ioba.va = (u64)evt->crq.ioba + - offsetof(struct ibmvfc_passthru_mad, iu); - mad->cmd_ioba.len = sizeof(mad->iu); - mad->iu.cmd_len = sizeof(mad->fc_iu.payload); - mad->iu.rsp_len = sizeof(mad->fc_iu.response); - mad->iu.cmd.va = (u64)evt->crq.ioba + + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU); + mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu)); + mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + + offsetof(struct ibmvfc_passthru_mad, iu)); + mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu)); + mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload)); + mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response)); + mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, fc_iu) + - offsetof(struct ibmvfc_passthru_fc_iu, payload); - mad->iu.cmd.len = sizeof(mad->fc_iu.payload); - mad->iu.rsp.va = (u64)evt->crq.ioba + + offsetof(struct ibmvfc_passthru_fc_iu, payload)); + mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload)); + mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_passthru_mad, fc_iu) + - offsetof(struct ibmvfc_passthru_fc_iu, response); - mad->iu.rsp.len = sizeof(mad->fc_iu.response); + offsetof(struct ibmvfc_passthru_fc_iu, response)); + mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response)); } /** @@ -3705,14 +4622,15 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt) /** * ibmvfc_adisc_timeout - Handle an ADISC timeout - * @tgt: ibmvfc target struct + * @t: ibmvfc target struct * * If an ADISC times out, send a cancel. If the cancel times * out, reset the CRQ. When the ADISC comes back as cancelled, * log back into the target. **/ -static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt) +static void ibmvfc_adisc_timeout(struct timer_list *t) { + struct ibmvfc_target *tgt = timer_container_of(tgt, t, timer); struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; @@ -3731,17 +4649,30 @@ static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt) vhost->abort_threads++; kref_get(&tgt->kref); - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + tgt_err(tgt, "Failed to get cancel event for ADISC.\n"); + vhost->abort_threads--; + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; tmf = &evt->iu.tmf; memset(tmf, 0, sizeof(*tmf)); - tmf->common.version = 1; - tmf->common.opcode = IBMVFC_TMF_MAD; - tmf->common.length = sizeof(*tmf); - tmf->scsi_id = tgt->scsi_id; - tmf->cancel_key = tgt->cancel_key; + if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) { + tmf->common.version = cpu_to_be32(2); + tmf->target_wwpn = cpu_to_be64(tgt->wwpn); + } else { + tmf->common.version = cpu_to_be32(1); + } + tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD); + tmf->common.length = cpu_to_be16(sizeof(*tmf)); + tmf->scsi_id = cpu_to_be64(tgt->scsi_id); + tmf->cancel_key = cpu_to_be32(tgt->cancel_key); rc = ibmvfc_send_event(evt, vhost, default_timeout); @@ -3776,37 +4707,41 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) return; kref_get(&tgt->kref); - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } vhost->discovery_threads++; ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT); evt->tgt = tgt; ibmvfc_init_passthru(evt); mad = &evt->iu.passthru; - mad->iu.flags = IBMVFC_FC_ELS; - mad->iu.scsi_id = tgt->scsi_id; - mad->iu.cancel_key = tgt->cancel_key; + mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS); + mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id); + mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key); - mad->fc_iu.payload[0] = IBMVFC_ADISC; + mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC); memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name, sizeof(vhost->login_buf->resp.port_name)); memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name, sizeof(vhost->login_buf->resp.node_name)); - mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff; + mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff); if (timer_pending(&tgt->timer)) mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); else { - tgt->timer.data = (unsigned long) tgt; tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ); - tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout; add_timer(&tgt->timer); } ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { vhost->discovery_threads--; - del_timer(&tgt->timer); + timer_delete(&tgt->timer); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); kref_put(&tgt->kref, ibmvfc_release_tgt); } else @@ -3823,7 +4758,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) struct ibmvfc_target *tgt = evt->tgt; struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; - u32 status = rsp->common.status; + u32 status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; @@ -3831,9 +4766,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) switch (status) { case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "Query Target succeeded\n"); - tgt->new_scsi_id = rsp->scsi_id; - if (rsp->scsi_id != tgt->scsi_id) - ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id) + ibmvfc_del_tgt(tgt); else ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc); break; @@ -3844,21 +4778,23 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && - rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && - rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && + be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ && + be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG) + ibmvfc_del_tgt(tgt); + else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + ibmvfc_del_tgt(tgt); tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), + ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type), + ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), + status); break; - }; + } kref_put(&tgt->kref, ibmvfc_release_tgt); ibmvfc_free_event(evt); @@ -3880,16 +4816,22 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) return; kref_get(&tgt->kref); - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + kref_put(&tgt->kref, ibmvfc_release_tgt); + __ibmvfc_reset_host(vhost); + return; + } vhost->discovery_threads++; evt->tgt = tgt; ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT); query_tgt = &evt->iu.query_tgt; memset(query_tgt, 0, sizeof(*query_tgt)); - query_tgt->common.version = 1; - query_tgt->common.opcode = IBMVFC_QUERY_TARGET; - query_tgt->common.length = sizeof(*query_tgt); - query_tgt->wwpn = tgt->ids.port_name; + query_tgt->common.version = cpu_to_be32(1); + query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET); + query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt)); + query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name); ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); if (ibmvfc_send_event(evt, vhost, default_timeout)) { @@ -3903,43 +4845,87 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt) /** * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target * @vhost: ibmvfc host struct - * @scsi_id: SCSI ID to allocate target for + * @target: Holds SCSI ID to allocate target forand the WWPN * * Returns: * 0 on success / other on failure **/ -static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) +static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, + struct ibmvfc_discover_targets_entry *target) { + struct ibmvfc_target *stgt = NULL; + struct ibmvfc_target *wtgt = NULL; struct ibmvfc_target *tgt; unsigned long flags; + u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK; + u64 wwpn = be64_to_cpu(target->wwpn); + /* Look to see if we already have a target allocated for this SCSI ID or WWPN */ spin_lock_irqsave(vhost->host->host_lock, flags); list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->wwpn == wwpn) { + wtgt = tgt; + break; + } + } + + list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->scsi_id == scsi_id) { - if (tgt->need_login) - ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + stgt = tgt; + break; + } + } + + if (wtgt && !stgt) { + /* + * A WWPN target has moved and we still are tracking the old + * SCSI ID. The only way we should be able to get here is if + * we attempted to send an implicit logout for the old SCSI ID + * and it failed for some reason, such as there being I/O + * pending to the target. In this case, we will have already + * deleted the rport from the FC transport so we do a move + * login, which works even with I/O pending, however, if + * there is still I/O pending, it will stay outstanding, so + * we only do this if fast fail is disabled for the rport, + * otherwise we let terminate_rport_io clean up the port + * before we login at the new location. + */ + if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) { + if (wtgt->move_login) { + /* + * Do a move login here. The old target is no longer + * known to the transport layer We don't use the + * normal ibmvfc_set_tgt_action to set this, as we + * don't normally want to allow this state change. + */ + wtgt->new_scsi_id = scsi_id; + wtgt->action = IBMVFC_TGT_ACTION_INIT; + wtgt->init_retries = 0; + ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login); + } goto unlock_out; + } else { + tgt_err(wtgt, "Unexpected target state: %d, %p\n", + wtgt->action, wtgt->rport); } + } else if (stgt) { + if (tgt->need_login) + ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); + goto unlock_out; } spin_unlock_irqrestore(vhost->host->host_lock, flags); tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); - if (!tgt) { - dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", - scsi_id); - return -ENOMEM; - } - memset(tgt, 0, sizeof(*tgt)); tgt->scsi_id = scsi_id; - tgt->new_scsi_id = scsi_id; + tgt->wwpn = wwpn; tgt->vhost = vhost; tgt->need_login = 1; - tgt->cancel_key = vhost->task_set++; - init_timer(&tgt->timer); + timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0); kref_init(&tgt->kref); ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); spin_lock_irqsave(vhost->host->host_lock, flags); + tgt->cancel_key = vhost->task_set++; list_add_tail(&tgt->queue, &vhost->targets); unlock_out: @@ -3959,8 +4945,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost) int i, rc; for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++) - rc = ibmvfc_alloc_target(vhost, - vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK); + rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]); return rc; } @@ -3974,19 +4959,20 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; - u32 mad_status = rsp->common.status; + u32 mad_status = be16_to_cpu(rsp->common.status); int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_dbg(vhost, "Discover Targets succeeded\n"); - vhost->num_targets = rsp->num_written; + vhost->num_targets = be32_to_cpu(rsp->num_written); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: level += ibmvfc_retry_host_init(vhost); ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -4008,17 +4994,25 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) { struct ibmvfc_discover_targets *mad; - struct ibmvfc_event *evt = ibmvfc_get_event(vhost); + struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + if (!evt) { + ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.discover_targets; memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_DISC_TARGETS; - mad->common.length = sizeof(*mad); - mad->bufflen = vhost->disc_buf_sz; - mad->buffer.va = vhost->disc_buf_dma; - mad->buffer.len = vhost->disc_buf_sz; + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz); + mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma); + mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz); + mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); if (!ibmvfc_send_event(evt, vhost, default_timeout)) @@ -4027,6 +5021,162 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); } +static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf; + struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs; + u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + int flags, active_queues, i; + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Channel Setup succeeded\n"); + flags = be32_to_cpu(setup->flags); + vhost->do_enquiry = 0; + active_queues = be32_to_cpu(setup->num_scsi_subq_channels); + scrqs->active_queues = active_queues; + + if (flags & IBMVFC_CHANNELS_CANCELED) { + ibmvfc_dbg(vhost, "Channels Canceled\n"); + vhost->using_channels = 0; + } else { + if (active_queues) + vhost->using_channels = 1; + for (i = 0; i < active_queues; i++) + scrqs->scrqs[i].vios_cookie = + be64_to_cpu(setup->channel_handles[i]); + + ibmvfc_dbg(vhost, "Using %u channels\n", + vhost->scsi_scrqs.active_queues); + } + break; + case IBMVFC_MAD_FAILED: + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Channel Setup failed\n"); + fallthrough; + case IBMVFC_MAD_DRIVER_FAILED: + return; + default: + dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n", + mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + return; + } + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + wake_up(&vhost->work_wait_q); +} + +static void ibmvfc_channel_setup(struct ibmvfc_host *vhost) +{ + struct ibmvfc_channel_setup_mad *mad; + struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf; + struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq); + struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs; + unsigned int num_channels = + min(scrqs->desired_queues, vhost->max_vios_scsi_channels); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + int i; + + if (!evt) { + ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + memset(setup_buf, 0, sizeof(*setup_buf)); + if (num_channels == 0) + setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS); + else { + setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels); + for (i = 0; i < num_channels; i++) + setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie); + } + + ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.channel_setup; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP); + mad->common.length = cpu_to_be16(sizeof(*mad)); + mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma); + mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf)); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent channel setup\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); +} + +static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry; + u32 mad_status = be16_to_cpu(rsp->common.status); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n"); + vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels); + ibmvfc_free_event(evt); + break; + case IBMVFC_MAD_FAILED: + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Channel Enquiry failed\n"); + fallthrough; + case IBMVFC_MAD_DRIVER_FAILED: + ibmvfc_free_event(evt); + return; + default: + dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n", + mad_status); + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_free_event(evt); + return; + } + + ibmvfc_channel_setup(vhost); +} + +static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost) +{ + struct ibmvfc_channel_enquiry *mad; + struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq); + int level = IBMVFC_DEFAULT_LOG_LEVEL; + + if (!evt) { + ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + + ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT); + mad = &evt->iu.channel_enquiry; + memset(mad, 0, sizeof(*mad)); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY); + mad->common.length = cpu_to_be16(sizeof(*mad)); + + if (mig_channels_only) + mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT); + if (mig_no_less_channels) + mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Send channel enquiry\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + /** * ibmvfc_npiv_login_done - Completion handler for NPIV Login * @evt: ibmvfc event struct @@ -4035,7 +5185,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost) static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; - u32 mad_status = evt->xfer_iu->npiv_login.common.status; + u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status); struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; int level = IBMVFC_DEFAULT_LOG_LEVEL; @@ -4045,16 +5195,18 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error))) level += ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), + be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_host_init(vhost); + fallthrough; case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; @@ -4067,7 +5219,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) vhost->client_migrated = 0; - if (!(rsp->flags & IBMVFC_NATIVE_FC)) { + if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) { dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n", rsp->flags); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); @@ -4075,7 +5227,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; } - if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) { + if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) { dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n", rsp->max_cmds); ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); @@ -4084,30 +5236,36 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) } vhost->logged_in = 1; - npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); + npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, rsp->drc_name, npiv_max_sectors); - fc_host_fabric_name(vhost->host) = rsp->node_name; - fc_host_node_name(vhost->host) = rsp->node_name; - fc_host_port_name(vhost->host) = rsp->port_name; - fc_host_port_id(vhost->host) = rsp->scsi_id; + fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name); + fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name); + fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name); + fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id); fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV; fc_host_supported_classes(vhost->host) = 0; - if (rsp->service_parms.class1_parms[0] & 0x80000000) + if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1; - if (rsp->service_parms.class2_parms[0] & 0x80000000) + if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2; - if (rsp->service_parms.class3_parms[0] & 0x80000000) + if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3; fc_host_maxframe_size(vhost->host) = - rsp->service_parms.common.bb_rcv_sz & 0x0fff; + be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff; - vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ; + vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ; vhost->host->max_sectors = npiv_max_sectors; - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); - wake_up(&vhost->work_wait_q); + + if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) { + ibmvfc_channel_enquiry(vhost); + } else { + vhost->do_enquiry = 0; + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + wake_up(&vhost->work_wait_q); + } } /** @@ -4118,7 +5276,13 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) { struct ibmvfc_npiv_login_mad *mad; - struct ibmvfc_event *evt = ibmvfc_get_event(vhost); + struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq); + + if (!evt) { + ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } ibmvfc_gather_partition_info(vhost); ibmvfc_set_login_info(vhost); @@ -4127,11 +5291,11 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info)); mad = &evt->iu.npiv_login; memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_NPIV_LOGIN; - mad->common.length = sizeof(struct ibmvfc_npiv_login_mad); - mad->buffer.va = vhost->login_buf_dma; - mad->buffer.len = sizeof(*vhost->login_buf); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN); + mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad)); + mad->buffer.va = cpu_to_be64(vhost->login_buf_dma); + mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf)); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT); @@ -4139,23 +5303,23 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) ibmvfc_dbg(vhost, "Sent NPIV login\n"); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); -}; +} /** * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout - * @vhost: ibmvfc host struct + * @evt: ibmvfc event struct * **/ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) { struct ibmvfc_host *vhost = evt->vhost; - u32 mad_status = evt->xfer_iu->npiv_logout.common.status; + u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status); ibmvfc_free_event(evt); switch (mad_status) { case IBMVFC_MAD_SUCCESS: - if (list_empty(&vhost->sent) && + if (list_empty(&vhost->crq.sent) && vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { ibmvfc_init_host(vhost); return; @@ -4183,14 +5347,20 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) struct ibmvfc_npiv_logout_mad *mad; struct ibmvfc_event *evt; - evt = ibmvfc_get_event(vhost); + evt = ibmvfc_get_reserved_event(&vhost->crq); + if (!evt) { + ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n"); + ibmvfc_hard_reset_host(vhost); + return; + } + ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); mad = &evt->iu.npiv_logout; memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_NPIV_LOGOUT; - mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); + mad->common.version = cpu_to_be32(1); + mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT); + mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad)); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); @@ -4221,6 +5391,25 @@ static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost) } /** + * ibmvfc_dev_logo_to_do - Is there target logout work to do? + * @vhost: ibmvfc host struct + * + * Returns: + * 1 if work to do / 0 if not + **/ +static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost) +{ + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT || + tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) + return 1; + } + return 0; +} + +/** * __ibmvfc_work_to_do - Is there task level work to do? (no locking) * @vhost: ibmvfc host struct * @@ -4249,17 +5438,26 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; + case IBMVFC_HOST_ACTION_TGT_DEL: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + if (vhost->discovery_threads == disc_threads) + return 0; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) + return 1; + list_for_each_entry(tgt, &vhost->targets, queue) + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT) + return 0; + return 1; case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: - case IBMVFC_HOST_ACTION_TGT_DEL: - case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_QUERY: case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_REENABLE: default: break; - }; + } return 1; } @@ -4321,9 +5519,17 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); spin_unlock_irqrestore(vhost->host->host_lock, flags); fc_remote_port_delete(rport); - del_timer_sync(&tgt->timer); + timer_delete_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; + } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt_dbg(tgt, "Deleting rport with outstanding I/O\n"); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); + tgt->rport = NULL; + tgt->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + return; } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) { spin_unlock_irqrestore(vhost->host->host_lock, flags); return; @@ -4332,17 +5538,15 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); tgt->rport = rport; - rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; + rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; - if (tgt->service_parms.class1_parms[0] & 0x80000000) + if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS1; - if (tgt->service_parms.class2_parms[0] & 0x80000000) + if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS2; - if (tgt->service_parms.class3_parms[0] & 0x80000000) + if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000) rport->supported_classes |= FC_COS_CLASS3; - if (rport->rqst_q) - blk_queue_max_segments(rport->rqst_q, 1); } else tgt_dbg(tgt, "rport add failed\n"); spin_unlock_irqrestore(vhost->host->host_lock, flags); @@ -4358,6 +5562,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) struct ibmvfc_target *tgt; unsigned long flags; struct fc_rport *rport; + LIST_HEAD(purge); int rc; ibmvfc_log_ae(vhost, vhost->events_to_log); @@ -4369,26 +5574,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) case IBMVFC_HOST_ACTION_INIT_WAIT: break; case IBMVFC_HOST_ACTION_RESET: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); rc = ibmvfc_reset_crq(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc == H_CLOSED) + if (!rc || rc == H_CLOSED) vio_enable_interrupts(to_vio_dev(vhost->dev)); - if (rc || (rc = ibmvfc_send_crq_init(vhost)) || - (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_RESET) { + /* + * The only action we could have changed to would have + * been reenable, in which case, we skip the rest of + * this path and wait until we've done the re-enable + * before sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + + if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_REENABLE: - vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); rc = ibmvfc_reenable_crq_queue(vhost); + spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc || (rc = ibmvfc_send_crq_init(vhost))) { - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) { + /* + * The only action we could have changed to would have + * been reset, in which case, we skip the rest of this + * path and wait until we've done the reset before + * sending the crq init. + */ + vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; + if (rc || (rc = ibmvfc_send_crq_init(vhost))) { + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc); + } } break; case IBMVFC_HOST_ACTION_LOGO: @@ -4423,6 +5651,18 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) { + tgt->job_step(tgt); + break; + } + } + + if (ibmvfc_dev_logo_to_do(vhost)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return; + } + + list_for_each_entry(tgt, &vhost->targets, queue) { if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { tgt_dbg(tgt, "Deleting rport\n"); rport = tgt->rport; @@ -4432,9 +5672,31 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) spin_unlock_irqrestore(vhost->host->host_lock, flags); if (rport) fc_remote_port_delete(rport); - del_timer_sync(&tgt->timer); + timer_delete_sync(&tgt->timer); kref_put(&tgt->kref, ibmvfc_release_tgt); return; + } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { + tgt_dbg(tgt, "Deleting rport with I/O outstanding\n"); + rport = tgt->rport; + tgt->rport = NULL; + tgt->init_retries = 0; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT); + + /* + * If fast fail is enabled, we wait for it to fire and then clean up + * the old port, since we expect the fast fail timer to clean up the + * outstanding I/O faster than waiting for normal command timeouts. + * However, if fast fail is disabled, any I/O outstanding to the + * rport LUNs will stay outstanding indefinitely, since the EH handlers + * won't get invoked for I/O's timing out. If this is a NPIV failover + * scenario, the better alternative is to use the move login. + */ + if (rport && rport->fast_io_fail_tmo == -1) + tgt->move_login = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + if (rport) + fc_remote_port_delete(rport); + return; } } @@ -4487,7 +5749,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) break; default: break; - }; + } spin_unlock_irqrestore(vhost->host->host_lock, flags); } @@ -4504,7 +5766,7 @@ static int ibmvfc_work(void *data) struct ibmvfc_host *vhost = data; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(vhost->work_wait_q, @@ -4523,6 +5785,74 @@ static int ibmvfc_work(void *data) } /** + * ibmvfc_alloc_queue - Allocate queue + * @vhost: ibmvfc host struct + * @queue: ibmvfc queue to allocate + * @fmt: queue format to allocate + * + * Returns: + * 0 on success / non-zero on failure + **/ +static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, + struct ibmvfc_queue *queue, + enum ibmvfc_msg_fmt fmt) +{ + struct device *dev = vhost->dev; + size_t fmt_size; + + ENTER; + spin_lock_init(&queue->_lock); + queue->q_lock = &queue->_lock; + + switch (fmt) { + case IBMVFC_CRQ_FMT: + fmt_size = sizeof(*queue->msgs.crq); + queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ; + queue->evt_depth = scsi_qdepth; + queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ; + break; + case IBMVFC_ASYNC_FMT: + fmt_size = sizeof(*queue->msgs.async); + break; + case IBMVFC_SUB_CRQ_FMT: + fmt_size = sizeof(*queue->msgs.scrq); + /* We need one extra event for Cancel Commands */ + queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ; + queue->evt_depth = scsi_qdepth; + queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ; + break; + default: + dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt); + return -EINVAL; + } + + queue->fmt = fmt; + if (ibmvfc_init_event_pool(vhost, queue)) { + dev_err(dev, "Couldn't initialize event pool.\n"); + return -ENOMEM; + } + + queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL); + if (!queue->msgs.handle) + return -ENOMEM; + + queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(dev, queue->msg_token)) { + free_page((unsigned long)queue->msgs.handle); + queue->msgs.handle = NULL; + return -ENOMEM; + } + + queue->cur = 0; + queue->size = PAGE_SIZE / fmt_size; + + queue->vhost = vhost; + return 0; +} + +/** * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor * @vhost: ibmvfc host struct * @@ -4537,21 +5867,12 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) int rc, retrc = -ENOMEM; struct device *dev = vhost->dev; struct vio_dev *vdev = to_vio_dev(dev); - struct ibmvfc_crq_queue *crq = &vhost->crq; + struct ibmvfc_queue *crq = &vhost->crq; ENTER; - crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL); - - if (!crq->msgs) + if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT)) return -ENOMEM; - crq->size = PAGE_SIZE / sizeof(*crq->msgs); - crq->msg_token = dma_map_single(dev, crq->msgs, - PAGE_SIZE, DMA_BIDIRECTIONAL); - - if (dma_mapping_error(dev, crq->msg_token)) - goto map_failed; - retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, crq->msg_token, PAGE_SIZE); @@ -4580,7 +5901,6 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost) goto req_irq_failed; } - crq->cur = 0; LEAVE; return retrc; @@ -4590,12 +5910,229 @@ req_irq_failed: rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); reg_crq_failed: - dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); -map_failed: - free_page((unsigned long)crq->msgs); + ibmvfc_free_queue(vhost, crq); return retrc; } +static int ibmvfc_register_channel(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels, + int index) +{ + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_queue *scrq = &channels->scrqs[index]; + int rc = -ENOMEM; + + ENTER; + + rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, + &scrq->cookie, &scrq->hw_irq); + + /* H_CLOSED indicates successful register, but no CRQ partner */ + if (rc && rc != H_CLOSED) { + dev_warn(dev, "Error registering sub-crq: %d\n", rc); + if (rc == H_PARAMETER) + dev_warn_once(dev, "Firmware may not support MQ\n"); + goto reg_failed; + } + + scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); + + if (!scrq->irq) { + rc = -EINVAL; + dev_err(dev, "Error mapping sub-crq[%d] irq\n", index); + goto irq_failed; + } + + switch (channels->protocol) { + case IBMVFC_PROTO_SCSI: + snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d", + vdev->unit_address, index); + scrq->handler = ibmvfc_interrupt_mq; + break; + case IBMVFC_PROTO_NVME: + snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d", + vdev->unit_address, index); + scrq->handler = ibmvfc_interrupt_mq; + break; + default: + dev_err(dev, "Unknown channel protocol (%d)\n", + channels->protocol); + goto irq_failed; + } + + rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq); + + if (rc) { + dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index); + irq_dispose_mapping(scrq->irq); + goto irq_failed; + } + + scrq->hwq_id = index; + + LEAVE; + return 0; + +irq_failed: + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); +reg_failed: + LEAVE; + return rc; +} + +static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels, + int index) +{ + struct device *dev = vhost->dev; + struct vio_dev *vdev = to_vio_dev(dev); + struct ibmvfc_queue *scrq = &channels->scrqs[index]; + long rc; + + ENTER; + + free_irq(scrq->irq, scrq); + irq_dispose_mapping(scrq->irq); + scrq->irq = 0; + + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, + scrq->cookie); + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + if (rc) + dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); + + /* Clean out the queue */ + memset(scrq->msgs.crq, 0, PAGE_SIZE); + scrq->cur = 0; + + LEAVE; +} + +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels) +{ + int i, j; + + ENTER; + if (!vhost->mq_enabled || !channels->scrqs) + return; + + for (i = 0; i < channels->max_queues; i++) { + if (ibmvfc_register_channel(vhost, channels, i)) { + for (j = i; j > 0; j--) + ibmvfc_deregister_channel(vhost, channels, j - 1); + vhost->do_enquiry = 0; + return; + } + } + + LEAVE; +} + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels) +{ + int i; + + ENTER; + if (!vhost->mq_enabled || !channels->scrqs) + return; + + for (i = 0; i < channels->max_queues; i++) + ibmvfc_deregister_channel(vhost, channels, i); + + LEAVE; +} + +static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels) +{ + struct ibmvfc_queue *scrq; + int i, j; + int rc = 0; + + channels->scrqs = kcalloc(channels->max_queues, + sizeof(*channels->scrqs), + GFP_KERNEL); + if (!channels->scrqs) + return -ENOMEM; + + for (i = 0; i < channels->max_queues; i++) { + scrq = &channels->scrqs[i]; + rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT); + if (rc) { + for (j = i; j > 0; j--) { + scrq = &channels->scrqs[j - 1]; + ibmvfc_free_queue(vhost, scrq); + } + kfree(channels->scrqs); + channels->scrqs = NULL; + channels->active_queues = 0; + return rc; + } + } + + return rc; +} + +static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) +{ + ENTER; + if (!vhost->mq_enabled) + return; + + if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) { + vhost->do_enquiry = 0; + vhost->mq_enabled = 0; + return; + } + + ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs); + + LEAVE; +} + +static void ibmvfc_release_channels(struct ibmvfc_host *vhost, + struct ibmvfc_channels *channels) +{ + struct ibmvfc_queue *scrq; + int i; + + if (channels->scrqs) { + for (i = 0; i < channels->max_queues; i++) { + scrq = &channels->scrqs[i]; + ibmvfc_free_queue(vhost, scrq); + } + + kfree(channels->scrqs); + channels->scrqs = NULL; + channels->active_queues = 0; + } +} + +static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) +{ + ENTER; + if (!vhost->scsi_scrqs.scrqs) + return; + + ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs); + + ibmvfc_release_channels(vhost, &vhost->scsi_scrqs); + LEAVE; +} + +static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels) +{ + dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf, + channels->disc_buf_dma); +} + /** * ibmvfc_free_mem - Free memory for vhost * @vhost: ibmvfc host struct @@ -4605,22 +6142,36 @@ map_failed: **/ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) { - struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq; + struct ibmvfc_queue *async_q = &vhost->async_crq; ENTER; mempool_destroy(vhost->tgt_pool); kfree(vhost->trace); - dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf, - vhost->disc_buf_dma); + ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs); dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); + dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf), + vhost->channel_setup_buf, vhost->channel_setup_dma); dma_pool_destroy(vhost->sg_pool); - dma_unmap_single(vhost->dev, async_q->msg_token, - async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL); - free_page((unsigned long)async_q->msgs); + ibmvfc_free_queue(vhost, async_q); LEAVE; } +static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels) +{ + channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets; + channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz, + &channels->disc_buf_dma, GFP_KERNEL); + + if (!channels->disc_buf) { + dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n", + (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe"); + return -ENOMEM; + } + + return 0; +} + /** * ibmvfc_alloc_mem - Allocate memory for vhost * @vhost: ibmvfc host struct @@ -4630,26 +6181,15 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost) **/ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) { - struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq; + struct ibmvfc_queue *async_q = &vhost->async_crq; struct device *dev = vhost->dev; ENTER; - async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL); - if (!async_q->msgs) { - dev_err(dev, "Couldn't allocate async queue.\n"); + if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) { + dev_err(dev, "Couldn't allocate/map async queue.\n"); goto nomem; } - async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq); - async_q->msg_token = dma_map_single(dev, async_q->msgs, - async_q->size * sizeof(*async_q->msgs), - DMA_BIDIRECTIONAL); - - if (dma_mapping_error(dev, async_q->msg_token)) { - dev_err(dev, "Failed to map async queue\n"); - goto free_async_crq; - } - vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev, SG_ALL * sizeof(struct srp_direct_buf), sizeof(struct srp_direct_buf), 0); @@ -4667,20 +6207,15 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) goto free_sg_pool; } - vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets; - vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz, - &vhost->disc_buf_dma, GFP_KERNEL); - - if (!vhost->disc_buf) { - dev_err(dev, "Couldn't allocate Discover Targets buffer\n"); + if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs)) goto free_login_buffer; - } vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, sizeof(struct ibmvfc_trace_entry), GFP_KERNEL); + atomic_set(&vhost->trace_index, -1); if (!vhost->trace) - goto free_disc_buffer; + goto free_scsi_disc_buffer; vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ, sizeof(struct ibmvfc_target)); @@ -4690,24 +6225,31 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) goto free_trace; } + vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf), + &vhost->channel_setup_dma, + GFP_KERNEL); + + if (!vhost->channel_setup_buf) { + dev_err(dev, "Couldn't allocate Channel Setup buffer\n"); + goto free_tgt_pool; + } + LEAVE; return 0; +free_tgt_pool: + mempool_destroy(vhost->tgt_pool); free_trace: kfree(vhost->trace); -free_disc_buffer: - dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf, - vhost->disc_buf_dma); +free_scsi_disc_buffer: + ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs); free_login_buffer: dma_free_coherent(dev, sizeof(*vhost->login_buf), vhost->login_buf, vhost->login_buf_dma); free_sg_pool: dma_pool_destroy(vhost->sg_pool); unmap_async_crq: - dma_unmap_single(dev, async_q->msg_token, - async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL); -free_async_crq: - free_page((unsigned long)async_q->msgs); + ibmvfc_free_queue(vhost, async_q); nomem: LEAVE; return -ENOMEM; @@ -4748,6 +6290,8 @@ static void ibmvfc_rport_add_thread(struct work_struct *work) tgt_dbg(tgt, "Setting rport roles\n"); fc_remote_port_rolechg(rport, tgt->ids.roles); put_device(&rport->dev); + } else { + spin_unlock_irqrestore(vhost->host->host_lock, flags); } kref_put(&tgt->kref, ibmvfc_release_tgt); @@ -4777,6 +6321,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) struct Scsi_Host *shost; struct device *dev = &vdev->dev; int rc = -ENOMEM; + unsigned int online_cpus = num_online_cpus(); + unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus); ENTER; shost = scsi_host_alloc(&driver_template, sizeof(*vhost)); @@ -4786,23 +6332,32 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) } shost->transportt = ibmvfc_transport_template; - shost->can_queue = max_requests; + shost->can_queue = scsi_qdepth; shost->max_lun = max_lun; shost->max_id = max_targets; - shost->max_sectors = IBMVFC_MAX_SECTORS; + shost->max_sectors = max_sectors; shost->max_cmd_len = IBMVFC_MAX_CDB_LEN; shost->unique_id = shost->host_no; + shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1; vhost = shost_priv(shost); - INIT_LIST_HEAD(&vhost->sent); - INIT_LIST_HEAD(&vhost->free); INIT_LIST_HEAD(&vhost->targets); + INIT_LIST_HEAD(&vhost->purge); sprintf(vhost->name, IBMVFC_NAME); vhost->host = shost; vhost->dev = dev; vhost->partition_number = -1; vhost->log_level = log_level; vhost->task_set = 1; + + vhost->mq_enabled = mq_enabled; + vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels); + vhost->scsi_scrqs.max_queues = shost->nr_hw_queues; + vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI; + vhost->using_channels = 0; + vhost->do_enquiry = 1; + vhost->scan_timeout = 0; + strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); @@ -4818,6 +6373,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (IS_ERR(vhost->work_thread)) { dev_err(dev, "Couldn't create kernel thread: %ld\n", PTR_ERR(vhost->work_thread)); + rc = PTR_ERR(vhost->work_thread); goto free_host_mem; } @@ -4826,13 +6382,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto kill_kthread; } - if ((rc = ibmvfc_init_event_pool(vhost))) { - dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc); - goto release_crq; - } - if ((rc = scsi_add_host(shost, dev))) - goto release_event_pool; + goto release_crq; fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO; @@ -4842,8 +6393,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) goto remove_shost; } - if (shost_to_fc_host(shost)->rqst_q) - blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); + ibmvfc_init_sub_crqs(vhost); + dev_set_drvdata(dev, vhost); spin_lock(&ibmvfc_driver_lock); list_add_tail(&vhost->queue, &ibmvfc_head); @@ -4855,8 +6406,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) remove_shost: scsi_remove_host(shost); -release_event_pool: - ibmvfc_free_event_pool(vhost); release_crq: ibmvfc_release_crq_queue(vhost); kill_kthread: @@ -4877,9 +6426,10 @@ out: * Return value: * 0 **/ -static int ibmvfc_remove(struct vio_dev *vdev) +static void ibmvfc_remove(struct vio_dev *vdev) { struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); + LIST_HEAD(purge); unsigned long flags; ENTER; @@ -4890,15 +6440,17 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_unlock_irqrestore(vhost->host->host_lock, flags); ibmvfc_wait_while_resetting(vhost); - ibmvfc_release_crq_queue(vhost); kthread_stop(vhost->work_thread); fc_remove_host(vhost->host); scsi_remove_host(vhost->host); spin_lock_irqsave(vhost->host->host_lock, flags); ibmvfc_purge_requests(vhost, DID_ERROR); - ibmvfc_free_event_pool(vhost); + list_splice_init(&vhost->purge, &purge); spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_complete_purge(&purge); + ibmvfc_release_sub_crqs(vhost); + ibmvfc_release_crq_queue(vhost); ibmvfc_free_mem(vhost); spin_lock(&ibmvfc_driver_lock); @@ -4906,7 +6458,6 @@ static int ibmvfc_remove(struct vio_dev *vdev) spin_unlock(&ibmvfc_driver_lock); scsi_host_put(vhost->host); LEAVE; - return 0; } /** @@ -4940,17 +6491,19 @@ static int ibmvfc_resume(struct device *dev) */ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev) { - unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu); + unsigned long pool_dma; + + pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu); return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun); } -static struct vio_device_id ibmvfc_device_table[] = { +static const struct vio_device_id ibmvfc_device_table[] = { {"fcp", "IBM,vfc-client"}, { "", "" } }; MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); -static struct dev_pm_ops ibmvfc_pm_ops = { +static const struct dev_pm_ops ibmvfc_pm_ops = { .resume = ibmvfc_resume }; @@ -4996,6 +6549,7 @@ static struct fc_function_template ibmvfc_transport_functions = { .get_starget_port_id = ibmvfc_get_starget_port_id, .show_starget_port_id = 1, + .max_bsg_segments = 1, .bsg_request = ibmvfc_bsg_request, .bsg_timeout = ibmvfc_bsg_timeout, }; @@ -5008,6 +6562,7 @@ static struct fc_function_template ibmvfc_transport_functions = { **/ static int __init ibmvfc_module_init(void) { + int min_max_sectors = PAGE_SIZE >> 9; int rc; if (!firmware_has_feature(FW_FEATURE_VIO)) @@ -5016,6 +6571,16 @@ static int __init ibmvfc_module_init(void) printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n", IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE); + /* + * Range check the max_sectors module parameter. The upper bounds is + * implicity checked since the parameter is a ushort. + */ + if (max_sectors < min_max_sectors) { + printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n", + min_max_sectors); + max_sectors = min_max_sectors; + } + ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions); if (!ibmvfc_transport_template) return -ENOMEM; |
