summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx/qla_nvme.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_nvme.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c1041
1 files changed, 836 insertions, 205 deletions
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 39d892bbd219..42eb65a62f1f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1,18 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_nvme.h"
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
+#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
+static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
+ struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a,
+ bool is_xchg_terminate);
-static void qla_nvme_unregister_remote_port(struct work_struct *);
+struct qla_nvme_unsol_ctx {
+ struct list_head elem;
+ struct scsi_qla_host *vha;
+ struct fc_port *fcport;
+ struct srb *sp;
+ struct nvmefc_ls_rsp lsrsp;
+ struct nvmefc_ls_rsp *fd_rsp;
+ struct work_struct lsrsp_work;
+ struct work_struct abort_work;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
+ int comp_status;
+ spinlock_t cmd_lock;
+};
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
@@ -30,7 +48,10 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0;
}
- if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
+ if (qla_nvme_register_hba(vha))
+ return 0;
+
+ if (!vha->nvme_local_port)
return 0;
if (!(fcport->nvme_prli_service_param &
@@ -38,14 +59,13 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
+ req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -72,9 +92,19 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ fcport->dev_loss_tmo);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
+ ql_log(ql_log_info, vha, 0x212a,
+ "PortID:%06x Supports SLER\n", req.port_id);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
+ ql_log(ql_log_info, vha, 0x212b,
+ "PortID:%06x Supports PI control\n", req.port_id);
+
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
- list_add_tail(&rport->list, &vha->nvme_rport_list);
fcport->nvme_flag |= NVME_FLAG_REGISTERED;
return 0;
@@ -88,8 +118,9 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
struct qla_qpair *qpair;
- if (!qidx)
- qidx++;
+ /* Map admin queue and 1st IO queue to index 0 */
+ if (qidx)
+ qidx--;
vha = (struct scsi_qla_host *)lport->private;
ha = vha->hw;
@@ -105,73 +136,164 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return -EINVAL;
}
- if (ha->queue_pair_map[qidx]) {
- *handle = ha->queue_pair_map[qidx];
- ql_log(ql_log_info, vha, 0x2121,
- "Returning existing qpair of %p for idx=%x\n",
- *handle, qidx);
- return 0;
- }
+ /* Use base qpair if max_qpairs is 0 */
+ if (!ha->max_qpairs) {
+ qpair = ha->base_qpair;
+ } else {
+ if (ha->queue_pair_map[qidx]) {
+ *handle = ha->queue_pair_map[qidx];
+ ql_log(ql_log_info, vha, 0x2121,
+ "Returning existing qpair of %p for idx=%x\n",
+ *handle, qidx);
+ return 0;
+ }
- qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
- if (qpair == NULL) {
- ql_log(ql_log_warn, vha, 0x2122,
- "Failed to allocate qpair\n");
- return -EINVAL;
+ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+ if (!qpair) {
+ ql_log(ql_log_warn, vha, 0x2122,
+ "Failed to allocate qpair\n");
+ return -EINVAL;
+ }
+ qla_adjust_iocb_limit(vha);
}
*handle = qpair;
return 0;
}
-static void qla_nvme_sp_ls_done(void *ptr, int res)
+static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
{
- srb_t *sp = ptr;
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_fcp_req *fd;
struct srb_iocb *nvme;
- struct nvmefc_ls_req *fd;
- struct nvme_private *priv;
+ unsigned long flags;
- if (atomic_read(&sp->ref_count) == 0) {
- ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
- "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
- return;
+ if (!priv)
+ goto out;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ if (priv->comp_status == QLA_SUCCESS) {
+ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+ fd->status = NVME_SC_SUCCESS;
+ } else {
+ fd->rcv_rsplen = 0;
+ fd->transferred_length = 0;
+ fd->status = NVME_SC_INTERNAL;
}
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd->done(fd);
+out:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_ls_req *fd;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
- if (!atomic_dec_and_test(&sp->ref_count))
+ fd = priv->fd;
+
+ fd->done(fd, priv->comp_status);
+out:
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+
+ kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
+}
+
+static void qla_nvme_sp_ls_done(srb_t *sp, int res)
+{
+ struct nvme_private *priv = sp->priv;
+
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
if (res)
res = -EINVAL;
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
- priv = fd->private;
priv->comp_status = res;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
schedule_work(&priv->ls_work);
- /* work schedule doesn't need the sp */
+}
+
+static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+ struct nvmefc_ls_rsp *fd_rsp;
+ unsigned long flags;
+
+ if (!uctx) {
+ qla2x00_rel_sp(sp);
+ return;
+ }
+
+ spin_lock_irqsave(&uctx->cmd_lock, flags);
+ uctx->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&uctx->cmd_lock, flags);
+
+ fd_rsp = uctx->fd_rsp;
+
+ list_del(&uctx->elem);
+
+ fd_rsp->done(fd_rsp);
+ kfree(uctx);
qla2x00_rel_sp(sp);
}
-static void qla_nvme_sp_done(void *ptr, int res)
+static void qla_nvme_lsrsp_complete(struct work_struct *work)
{
- srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_fcp_req *fd;
+ struct qla_nvme_unsol_ctx *uctx =
+ container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
+ kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
+}
+
+static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
+{
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
- if (!atomic_dec_and_test(&sp->ref_count))
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
- if (res == QLA_SUCCESS)
- fd->status = 0;
- else
- fd->status = NVME_SC_INTERNAL;
+ if (res)
+ res = -EINVAL;
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
- fd->done(fd);
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ uctx->comp_status = res;
+ INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
+ schedule_work(&uctx->lsrsp_work);
+}
+
+/* it assumed that QPair lock is held. */
+static void qla_nvme_sp_done(srb_t *sp, int res)
+{
+ struct nvme_private *priv = sp->priv;
+
+ priv->comp_status = res;
+ kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
return;
}
@@ -183,32 +305,166 @@ static void qla_nvme_abort_work(struct work_struct *work)
srb_t *sp = priv->sp;
fc_port_t *fcport = sp->fcport;
struct qla_hw_data *ha = fcport->vha->hw;
- int rval;
+ int rval, abts_done_called = 1;
+ bool io_wait_for_abort_done;
+ uint32_t handle;
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
+ "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
+ __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
+
+ if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
+ goto out;
+
+ if (ha->flags.host_shutting_down) {
+ ql_log(ql_log_info, sp->fcport->vha, 0xffff,
+ "%s Calling done on sp: %p, type: 0x%x\n",
+ __func__, sp, sp->type);
+ sp->done(sp, 0);
+ goto out;
+ }
+
+ /*
+ * sp may not be valid after abort_command if return code is either
+ * SUCCESS or ERR_FROM_FW codes, so cache the value here.
+ */
+ io_wait_for_abort_done = ql2xabts_wait_nvme &&
+ QLA_ABTS_WAIT_ENABLED(sp);
+ handle = sp->handle;
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
- sp, sp->handle, fcport, rval);
+ sp, handle, fcport, rval);
+
+ /*
+ * If async tmf is enabled, the abort callback is called only on
+ * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
+ */
+ if (ql2xasynctmfenable &&
+ rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
+ abts_done_called = 0;
+
+ /*
+ * Returned before decreasing kref so that I/O requests
+ * are waited until ABTS complete. This kref is decreased
+ * at qla24xx_abort_sp_done function.
+ */
+ if (abts_done_called && io_wait_for_abort_done)
+ return;
+out:
+ /* kref_get was done before work was schedule. */
+ kref_put(&sp->cmd_kref, sp->put_fn);
+}
+
+static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *fd_resp)
+{
+ struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
+ struct qla_nvme_unsol_ctx, lsrsp);
+ struct qla_nvme_rport *qla_rport = rport->private;
+ fc_port_t *fcport = qla_rport->fcport;
+ struct scsi_qla_host *vha = uctx->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct srb_iocb *nvme;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ uint8_t cnt = 0;
+
+ if (!fcport || fcport->deleted)
+ goto out;
+
+ if (!ha->flags.fw_started)
+ goto out;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto out;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_lsrsp_done;
+ sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
+ sp->priv = (void *)uctx;
+ sp->unsol_rsp = 1;
+ uctx->sp = sp;
+ spin_lock_init(&uctx->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
+ uctx->fd_rsp = fd_resp;
+ nvme->u.nvme.desc = fd_resp;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.timeout_sec = 0;
+ nvme->u.nvme.cmd_dma = fd_resp->rspdma;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
+ nvme->u.nvme.rsp_len = 0;
+ nvme->u.nvme.rsp_dma = 0;
+ nvme->u.nvme.exchange_address = uctx->exchange_address;
+ nvme->u.nvme.nport_handle = uctx->nport_handle;
+ nvme->u.nvme.ox_id = uctx->ox_id;
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd_resp->rsplen, DMA_TO_DEVICE);
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2122,
+ "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
+ fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
+ uctx->ox_id, uctx->nport_handle);
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case -EAGAIN:
+ msleep(PURLS_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < PURLS_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_dbg(ql_log_warn, vha, 0x2123,
+ "Failed to xmit Unsol ls response = %d\n", rval);
+ rval = -EIO;
+ qla2x00_rel_sp(sp);
+ goto out;
+ }
+
+ return 0;
+out:
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
+ kfree(uctx);
+ return rval;
}
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
- INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
- schedule_work(&priv->abort_work);
-}
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
-static void qla_nvme_ls_complete(struct work_struct *work)
-{
- struct nvme_private *priv =
- container_of(work, struct nvme_private, ls_work);
- struct nvmefc_ls_req *fd = priv->fd;
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
- fd->done(fd, priv->comp_status);
+ INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
+ schedule_work(&priv->abort_work);
}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
@@ -223,8 +479,15 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
+ if (!fcport || fcport->deleted)
+ return rval;
+
vha = fcport->vha;
ha = vha->hw;
+
+ if (!ha->flags.fw_started)
+ return rval;
+
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
@@ -233,20 +496,21 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->type = SRB_NVME_LS;
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
- atomic_set(&sp->ref_count, 1);
- nvme = &sp->u.iocb_cmd;
+ sp->put_fn = qla_nvme_release_ls_cmd_kref;
+ sp->priv = priv;
priv->sp = sp;
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
priv->fd = fd;
- INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
- nvme->u.nvme.cmd_len = fd->rqstlen;
- nvme->u.nvme.rsp_len = fd->rsplen;
+ nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
+ nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
- nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
- fd->rqstlen, DMA_TO_DEVICE);
+ nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
@@ -254,8 +518,9 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
- wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2x00_rel_sp(sp);
return rval;
}
@@ -267,6 +532,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
struct nvmefc_fcp_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
@@ -276,48 +553,57 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
{
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_nvme *cmd_pkt;
uint16_t cnt, i;
uint16_t req_cnt;
uint16_t tot_dsds;
uint16_t avail_dsds;
- uint32_t *cur_dsd;
+ struct dsd64 *cur_dsd;
struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
struct srb_iocb *nvme = &sp->u.iocb_cmd;
struct scatterlist *sgl, *sg;
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
req = qpair->req;
+ rsp = qpair->rsp;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
+ rval = -EBUSY;
+ goto queuing_error;
}
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (index == req->num_outstanding_cmds) {
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
rval = -EBUSY;
goto queuing_error;
}
- req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
if (req->cnt < (req_cnt + 2)) {
- cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
+ rval = -EBUSY;
+ goto queuing_error;
+ }
+ }
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -331,7 +617,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
if (unlikely(!fd->sqid)) {
- struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
@@ -345,7 +630,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->cnt -= req_cnt;
cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+ cmd_pkt->handle = make_handle(req->id, handle);
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -358,17 +643,33 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags =
- cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
- vha->qla_stats.input_bytes += fd->payload_length;
- vha->qla_stats.input_requests++;
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+ qpair->counters.input_bytes += fd->payload_length;
+ qpair->counters.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags =
- cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
- vha->qla_stats.output_bytes += fd->payload_length;
- vha->qla_stats.output_requests++;
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+ if ((vha->flags.nvme_first_burst) &&
+ (sp->fcport->nvme_prli_service_param &
+ NVME_PRLI_SP_FIRST_BURST)) {
+ if ((fd->payload_length <=
+ sp->fcport->nvme_first_burst_size) ||
+ (sp->fcport->nvme_first_burst_size == 0))
+ cmd_pkt->control_flags |=
+ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
+ }
+ qpair->counters.output_bytes += fd->payload_length;
+ qpair->counters.output_requests++;
} else if (fd->io_dir == 0) {
- cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+ cmd_pkt->control_flags = 0;
+ }
+
+ if (sp->fcport->edif.enable && fd->io_dir != 0)
+ cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
+
+ /* Set BIT_13 of control flags for Async event */
+ if (vha->flags.nvme2_enabled &&
+ cmd->sqe.common.opcode == nvme_admin_async_event) {
+ cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
}
/* Set NPORT-ID */
@@ -380,25 +681,22 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* NVME RSP IU */
cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
- cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
- cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+ put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
/* NVME CNMD IU */
cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
- cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
- cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+ cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
/* One DSD is available in the Command Type NVME IOCB */
avail_dsds = 1;
- cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+ cur_dsd = &cmd_pkt->nvme_dsd;
sgl = fd->first_sgl;
/* Load data segments */
for_each_sg(sgl, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
cont_a64_entry_t *cont_pkt;
/* Allocate additional continuation packets? */
@@ -417,17 +715,14 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
- *((uint32_t *)(&cont_pkt->entry_type)) =
- cpu_to_le32(CONTINUE_A64_TYPE);
+ put_unaligned_le32(CONTINUE_A64_TYPE,
+ &cont_pkt->entry_type);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
+ cur_dsd = cont_pkt->dsd;
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
}
@@ -444,11 +739,22 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req->ring_ptr++;
}
+ /* ignore nvme async cmd due to long timeout */
+ if (!nvme->u.nvme.aen_op)
+ sp->qpair->cmd_cnt++;
+
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
queuing_error:
+ if (rval)
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
return rval;
}
@@ -460,18 +766,31 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
- int rval = -ENODEV;
+ struct qla_hw_data *ha;
+ int rval;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
struct nvme_private *priv = fd->private;
struct qla_nvme_rport *qla_rport = rport->private;
+ if (!priv) {
+ /* nvme association has been torn down */
+ return -ENODEV;
+ }
+
fcport = qla_rport->fcport;
+ if (unlikely(!qpair || !fcport || fcport->deleted))
+ return -EBUSY;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return -ENODEV;
+
vha = fcport->vha;
+ ha = vha->hw;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
- return rval;
+ return -EBUSY;
/*
* If we know the dev is going away while the transport is still sending
@@ -483,33 +802,47 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (fcport->nvme_flag & NVME_FLAG_RESETTING)
return -EBUSY;
+ qpair = qla_mapq_nvme_select_qpair(ha, qpair);
+
/* Alloc SRB structure */
sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
return -EBUSY;
- atomic_set(&sp->ref_count, 1);
- init_waitqueue_head(&sp->nvme_ls_waitq);
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ sp->priv = priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
+ sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
+ sp->cmd_sp = sp;
nvme = &sp->u.iocb_cmd;
nvme->u.nvme.desc = fd;
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
- wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
return rval;
}
+static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
+ struct blk_mq_queue_map *map)
+{
+ struct scsi_qla_host *vha = lport->private;
+
+ blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
+}
+
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@@ -523,29 +856,16 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
{
fc_port_t *fcport;
- struct qla_nvme_rport *qla_rport = rport->private, *trport;
+ struct qla_nvme_rport *qla_rport = rport->private;
fcport = qla_rport->fcport;
fcport->nvme_remote_port = NULL;
fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- list_del(&qla_rport->list);
- break;
- }
- }
- complete(&fcport->nvme_del_done);
-
- if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
- INIT_WORK(&fcport->free_work, qlt_free_session_done);
- schedule_work(&fcport->free_work);
- }
-
fcport->nvme_flag &= ~NVME_FLAG_DELETING;
ql_log(ql_log_info, fcport->vha, 0x2110,
- "remoteport_delete of %p completed.\n", fcport);
+ "remoteport_delete of %p %8phN completed.\n",
+ fcport, fcport->port_name);
+ complete(&fcport->nvme_del_done);
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
@@ -557,8 +877,10 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
- .max_hw_queues = 8,
- .max_sgl_segments = 128,
+ .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
+ .map_queues = qla_nvme_map_queues,
+ .max_hw_queues = DEF_NVME_HW_QUEUES,
+ .max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
.local_priv_sz = 8,
@@ -567,79 +889,36 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
-#define NVME_ABORT_POLLING_PERIOD 2
-static int qla_nvme_wait_on_command(srb_t *sp)
-{
- int ret = QLA_SUCCESS;
-
- wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
- NVME_ABORT_POLLING_PERIOD*HZ);
-
- if (atomic_read(&sp->ref_count) > 1)
- ret = QLA_FUNCTION_FAILED;
-
- return ret;
-}
-
-void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
-{
- int rval;
-
- if (ha->flags.fw_started) {
- rval = ha->isp_ops->abort_command(sp);
- if (!rval && !qla_nvme_wait_on_command(sp))
- ql_log(ql_log_warn, NULL, 0x2112,
- "timed out waiting on sp=%p\n", sp);
- } else {
- sp->done(sp, res);
- }
-}
-
-static void qla_nvme_unregister_remote_port(struct work_struct *work)
+void qla_nvme_unregister_remote_port(struct fc_port *fcport)
{
- struct fc_port *fcport = container_of(work, struct fc_port,
- nvme_del_work);
- struct qla_nvme_rport *qla_rport, *trport;
+ int ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- ql_log(ql_log_warn, NULL, 0x2112,
- "%s: unregister remoteport on %p\n",__func__, fcport);
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- ql_log(ql_log_info, fcport->vha, 0x2113,
- "%s: fcport=%p\n", __func__, fcport);
- init_completion(&fcport->nvme_del_done);
- nvme_fc_unregister_remoteport(
- fcport->nvme_remote_port);
- wait_for_completion(&fcport->nvme_del_done);
- break;
- }
- }
+ ql_log(ql_log_warn, fcport->vha, 0x2112,
+ "%s: unregister remoteport on %p %8phN\n",
+ __func__, fcport, fcport->port_name);
+
+ if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+
+ init_completion(&fcport->nvme_del_done);
+ ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ if (ret)
+ ql_log(ql_log_info, fcport->vha, 0x2114,
+ "%s: Failed to unregister nvme_remote_port (%d)\n",
+ __func__, ret);
+ wait_for_completion(&fcport->nvme_del_done);
}
void qla_nvme_delete(struct scsi_qla_host *vha)
{
- struct qla_nvme_rport *qla_rport, *trport;
- fc_port_t *fcport;
int nv_ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- list_for_each_entry_safe(qla_rport, trport,
- &vha->nvme_rport_list, list) {
- fcport = qla_rport->fcport;
-
- ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
- __func__, fcport);
-
- nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- }
-
if (vha->nvme_local_port) {
init_completion(&vha->nvme_del_done);
ql_log(ql_log_info, vha, 0x2116,
@@ -659,7 +938,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret = EINVAL;
+ int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
@@ -667,25 +946,51 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
- WARN_ON(vha->nvme_local_port);
- WARN_ON(ha->max_req_queues < 3);
+ if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
+ ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
+ ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+ } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
+ ql2xnvme_queues, (ha->max_qpairs - 1),
+ (ha->max_qpairs - 1));
+ ql2xnvme_queues = ((ha->max_qpairs - 1));
+ }
qla_nvme_fc_transport.max_hw_queues =
- min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
- (uint8_t)(ha->max_req_queues - 2));
+ min((uint8_t)(ql2xnvme_queues),
+ (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
+
+ ql_log(ql_log_info, vha, 0xfffb,
+ "Number of NVME queues used for this port: %d\n",
+ qla_nvme_fc_transport.max_hw_queues);
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
pinfo.port_id = vha->d_id.b24;
- ql_log(ql_log_info, vha, 0xffff,
- "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
- pinfo.node_name, pinfo.port_name, pinfo.port_id);
- qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
-
- ret = nvme_fc_register_localport(&pinfo, tmpl,
- get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ mutex_lock(&ha->vport_lock);
+ /*
+ * Check again for nvme_local_port to see if any other thread raced
+ * with this one and finished registration.
+ */
+ if (!vha->nvme_local_port) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+ pinfo.node_name, pinfo.port_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev),
+ &vha->nvme_local_port);
+ mutex_unlock(&ha->vport_lock);
+ } else {
+ mutex_unlock(&ha->vport_lock);
+ return 0;
+ }
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
@@ -695,3 +1000,329 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
return ret;
}
+
+void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ struct qla_hw_data *ha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ ha = orig_sp->fcport->vha->hw;
+
+ WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
+ /* Use Driver Specified Retry Count */
+ abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
+ abt->drv.abts_rty_cnt = cpu_to_le16(2);
+ /* Use specified response timeout */
+ abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
+ /* set it to 2 * r_a_tov in secs */
+ abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
+}
+
+void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ u16 comp_status;
+ struct scsi_qla_host *vha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ vha = orig_sp->fcport->vha;
+
+ comp_status = le16_to_cpu(abt->comp_status);
+ switch (comp_status) {
+ case CS_RESET: /* reset event aborted */
+ case CS_ABORTED: /* IOCB was cleaned */
+ /* N_Port handle is not currently logged in */
+ case CS_TIMEOUT:
+ /* N_Port handle was logged out while waiting for ABTS to complete */
+ case CS_PORT_UNAVAILABLE:
+ /* Firmware found that the port name changed */
+ case CS_PORT_LOGGED_OUT:
+ /* BA_RJT was received for the ABTS */
+ case CS_PORT_CONFIG_CHG:
+ ql_dbg(ql_dbg_async, vha, 0xf09d,
+ "Abort I/O IOCB completed with error, comp_status=%x\n",
+ comp_status);
+ break;
+
+ /* BA_RJT was received for the ABTS */
+ case CS_REJECT_RECEIVED:
+ ql_dbg(ql_dbg_async, vha, 0xf09e,
+ "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
+ abt->fw.ba_rjt_vendorUnique);
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+ "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
+ abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
+ break;
+
+ case CS_COMPLETE:
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
+ "IOCB request is completed successfully comp_status=%x\n",
+ comp_status);
+ break;
+
+ case CS_IOCB_ERROR:
+ ql_dbg(ql_dbg_async, vha, 0xf0a0,
+ "IOCB request is failed, comp_status=%x\n", comp_status);
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xf0a1,
+ "Invalid Abort IO IOCB Completion Status %x\n",
+ comp_status);
+ break;
+ }
+}
+
+inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
+{
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+ kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
+}
+
+static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
+ u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
+ rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
+ rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ rjt->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ rjt->rqst.w0.ls_cmd = ls_cmd;
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+}
+
+static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
+ struct pt_ls4_request *lsrjt_iocb,
+ struct qla_nvme_lsrjt_pt_arg *a)
+{
+ lsrjt_iocb->entry_type = PT_LS4_REQUEST;
+ lsrjt_iocb->entry_count = 1;
+ lsrjt_iocb->sys_define = 0;
+ lsrjt_iocb->entry_status = 0;
+ lsrjt_iocb->handle = QLA_SKIP_HANDLE;
+ lsrjt_iocb->nport_handle = a->nport_handle;
+ lsrjt_iocb->exchange_address = a->xchg_address;
+ lsrjt_iocb->vp_index = a->vp_idx;
+
+ lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
+ lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
+ lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
+ lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+
+ put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
+ lsrjt_iocb->dsd[1].length = 0;
+ lsrjt_iocb->rx_dseg_count = 0;
+ lsrjt_iocb->rx_byte_count = 0;
+}
+
+static int
+qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
+{
+ struct pt_ls4_request *lsrjt_iocb;
+
+ lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!lsrjt_iocb) {
+ ql_log(ql_log_warn, vha, 0x210e,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (!is_xchg_terminate) {
+ qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
+ a->reason, a->explanation, 0);
+
+ a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
+ a->tx_addr = vha->hw->lsrjt.cdma;
+ a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x211f,
+ "Sending nvme fc ls reject ox_id %04x op %04x\n",
+ a->ox_id, a->opcode);
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
+ vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
+ } else {
+ a->tx_byte_count = 0;
+ a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
+ ql_dbg(ql_dbg_unsol, vha, 0x2110,
+ "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
+ }
+
+ qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+/*
+ * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
+ * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
+ * LLDD need to provide memory for response buffer, which
+ * will be used to reference the exchange corresponding
+ * to the LS when issuing an ls response. LLDD will have to free
+ * response buffer in lport->ops->xmt_ls_rsp().
+ *
+ * @vha: SCSI qla host
+ * @item: ptr to purex_item
+ */
+static void
+qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ struct qla_nvme_unsol_ctx *uctx = item->purls_context;
+ struct qla_nvme_lsrjt_pt_arg a;
+ int ret = 1;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
+ &item->iocb, item->size);
+#endif
+ if (ret) {
+ ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
+ list_del(&uctx->elem);
+ kfree(uctx);
+ }
+}
+
+static scsi_qla_host_t *
+qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
+{
+ scsi_qla_host_t *base_vha, *vha, *tvp;
+ unsigned long flags;
+
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!vp_index && !ha->num_vhosts)
+ return base_vha;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
+ if (vha->vp_idx == vp_index) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return NULL;
+}
+
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
+{
+ struct nvme_fc_remote_port *rport;
+ struct qla_nvme_rport *qla_rport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct pt_ls4_rx_unsol *p = *pkt;
+ struct qla_nvme_unsol_ctx *uctx;
+ struct rsp_que *rsp_q = *rsp;
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ fc_port_t *fcport = NULL;
+ struct purex_item *item;
+ port_id_t d_id = {0};
+ port_id_t id = {0};
+ u8 *opcode;
+ bool xmt_reject = false;
+
+ ha = rsp_q->hw;
+
+ vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
+ if (!vha) {
+ ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ memset((void *)&a, 0, sizeof(a));
+ opcode = (u8 *)&p->payload[0];
+ a.opcode = opcode[3];
+ a.vp_idx = p->vp_index;
+ a.nport_handle = p->nport_handle;
+ a.ox_id = p->ox_id;
+ a.xchg_address = p->exchange_address;
+
+ id.b.domain = p->s_id.domain;
+ id.b.area = p->s_id.area;
+ id.b.al_pa = p->s_id.al_pa;
+ d_id.b.domain = p->d_id[2];
+ d_id.b.area = p->d_id[1];
+ d_id.b.al_pa = p->d_id[0];
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
+ if (!fcport) {
+ ql_dbg(ql_dbg_unsol, vha, 0x211e,
+ "Failed to find sid=%06x did=%06x\n",
+ id.b24, d_id.b24);
+ a.reason = FCNVME_RJT_RC_INV_ASSOC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+ rport = fcport->nvme_remote_port;
+ qla_rport = rport->private;
+
+ item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
+ if (!item) {
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+
+ uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
+ if (!uctx) {
+ ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ qla24xx_free_purex_item(item);
+ goto out;
+ }
+
+ uctx->vha = vha;
+ uctx->fcport = fcport;
+ uctx->exchange_address = p->exchange_address;
+ uctx->nport_handle = p->nport_handle;
+ uctx->ox_id = p->ox_id;
+ qla_rport->uctx = uctx;
+ INIT_LIST_HEAD(&uctx->elem);
+ list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
+ item->purls_context = (void *)uctx;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2121,
+ "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
+ item->iocb.iocb[3], item->size, uctx->exchange_address,
+ fcport->d_id.b24);
+ /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * ----- -----------------------------------------------
+ * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
+ * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
+ * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ */
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
+ &item->iocb, item->size);
+
+ qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
+out:
+ if (xmt_reject) {
+ qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
+ __qla_consume_iocb(vha, pkt, rsp);
+ }
+}