summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_bsg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2244
1 files changed, 1391 insertions, 853 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6630520d295c..d61d979f9b77 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -24,6 +26,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
+#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -84,25 +88,16 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
- struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+ struct bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -125,6 +120,16 @@ enum ELX_LOOPBACK_CMD {
#define ELX_LOOPBACK_HEADER_SZ \
(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+/* For non-embedded read object command */
+#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128}
+#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192}
+static const struct lpfc_read_object_cmd_scheme {
+ u32 min_hbd_cnt;
+ u32 max_hbd_cnt;
+ u32 cmd_size;
+ u32 payload_word_offset;
+} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1};
+
struct lpfc_dmabufext {
struct lpfc_dmabuf dma;
uint32_t size;
@@ -139,8 +144,8 @@ lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
if (mlist) {
list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
list) {
- lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
list_del(&mlast->list);
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
kfree(mlast);
}
lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
@@ -211,7 +216,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
- struct fc_bsg_buffer *bsg_buffers,
+ struct bsg_buffer *bsg_buffers,
unsigned int bytes_to_transfer, int to_buffers)
{
@@ -297,39 +302,47 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
- IOCB_t *rsp;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
- unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->ndlp;
+ ndlp = iocb->cmdiocbq->ndlp;
rmp = iocb->rmp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
/* Copy the completed data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -344,10 +357,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ total_data_placed, 0);
}
}
@@ -355,15 +367,16 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -373,26 +386,34 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
- uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
- IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
+ int request_nseg, reply_nseg;
+ u32 num_entry;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
+ u16 ulp_context;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) ||
+ test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) ||
+ test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) ||
+ test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) ||
+ test_bit(NLP_RNID_SND, &ndlp->nlp_flag))
+ return -ENODEV;
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -403,24 +424,12 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
goto no_dd_data;
}
- if (!lpfc_nlp_get(ndlp)) {
- rc = -ENODEV;
- goto no_ndlp;
- }
-
- if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
- rc = -ENODEV;
- goto free_ndlp;
- }
-
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- goto free_ndlp;
+ goto free_dd;
}
- cmd = &cmdiocbq->iocb;
-
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
@@ -454,39 +463,28 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
goto free_cmp;
}
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = ndlp->nlp_rpi;
+ num_entry = request_nseg + reply_nseg;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- cmd->ulpOwner = OWN_CHIP;
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
+
+ lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
+ phba->fc_ratov * 2);
+
+ cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
- cmdiocbq->context3 = bmp;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- timeout = phba->fc_ratov * 2;
- cmd->ulpTimeout = timeout;
-
- cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- cmdiocbq->context_un.ndlp = ndlp;
+ cmdiocbq->cmd_dmabuf = cmp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+
+ cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context_un.dd_data = dd_data;
+
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.ndlp = ndlp;
dd_data->context_un.iocb.rmp = rmp;
job->dd_data = dd_data;
@@ -500,15 +498,30 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp) {
+ rc = -ENODEV;
+ goto free_rmp;
+ }
+
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (iocb_stat == IOCB_SUCCESS)
+ if (iocb_stat == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed yet */
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (iocb_stat == IOCB_BUSY)
+ } else if (iocb_stat == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
/* iocb failed so cleanup */
+ lpfc_nlp_put(ndlp);
free_rmp:
lpfc_free_bsg_buffers(phba, rmp);
@@ -520,13 +533,11 @@ free_bmp:
kfree(bmp);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
-free_ndlp:
- lpfc_nlp_put(ndlp);
-no_ndlp:
+free_dd:
kfree(dd_data);
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -554,8 +565,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
- IOCB_t *rsp;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
@@ -563,22 +574,31 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
ndlp = dd_data->context_un.iocb.ndlp;
- cmdiocbq->context1 = ndlp;
+ cmdiocbq->ndlp = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- rsp = &rspiocbq->iocb;
- pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
+ pcmd = cmdiocbq->cmd_dmabuf;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
@@ -586,38 +606,44 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
*/
if (job) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- rsp_size = rsp->un.elsreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
+ if (ulp_status == IOSTAT_SUCCESS) {
+ rsp_size = total_data_placed;
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
+ } else if (ulp_status == IOSTAT_LS_RJT) {
+ bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
+ rjt_data = (uint8_t *)&ulp_word4;
+ els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT) {
+ rc = -ETIMEDOUT;
} else {
rc = -EIO;
}
}
- lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, cmdiocbq);
+
+ lpfc_nlp_put(ndlp);
kfree(dd_data);
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -627,23 +653,25 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata = job->rport->dd_data;
+ struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t elscmd;
uint32_t cmdsize;
- uint32_t rspsize;
struct lpfc_iocbq *cmdiocbq;
uint16_t rpi = 0;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* verify the els command is not greater than the
* maximum ELS transfer size.
@@ -663,9 +691,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- elscmd = job->request->rqst_data.r_els.els_code;
+ elscmd = bsg_request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
- rspsize = job->reply_payload.payload_len;
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
@@ -677,7 +704,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
-
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
@@ -685,23 +711,23 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto release_ndlp;
}
- rpi = ndlp->nlp_rpi;
-
/* Transfer the request payload to allocated command dma buffer */
-
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
- ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdiocbq->cmd_dmabuf->virt,
cmdsize);
+ rpi = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
+ phba->sli4_hba.rpi_ids[rpi]);
else
cmdiocbq->iocb.ulpContext = rpi;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context_un.dd_data = dd_data;
+ cmdiocbq->ndlp = ndlp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -720,17 +746,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
- else if (rc == IOCB_BUSY)
+ } else if (rc == IOCB_BUSY) {
rc = -EAGAIN;
- else
+ } else {
rc = -EIO;
+ }
-linkdown_err:
+ /* I/O issue failed. Cleanup resources. */
- cmdiocbq->context1 = ndlp;
+linkdown_err:
lpfc_els_free_iocb(phba, cmdiocbq);
release_ndlp:
@@ -741,7 +774,7 @@ free_dd_data:
no_dd_data:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -859,11 +892,8 @@ diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
return 0;
}
-/**
+/*
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
- * @phba:
- * @pring:
- * @piocbq:
*
* This function is called when an unsolicited CT command is received. It
* forwards the event to any processes registered to receive CT events.
@@ -873,52 +903,33 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
uint32_t evt_req_id = 0;
- uint32_t cmd;
- uint32_t len;
+ u16 cmd;
struct lpfc_dmabuf *dmabuf = NULL;
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
+ IOCB_t *iocb = NULL;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
- struct lpfc_hbq_entry *hbqe;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
struct lpfc_sli_ct_request *ct_req;
- struct fc_bsg_job *job = NULL;
+ struct bsg_job *job = NULL;
+ struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
+ u32 bde_count = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
- if (piocbq->iocb.ulpBdeCount == 0 ||
- piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
- goto error_ct_unsol_exit;
-
- if (phba->link_state == LPFC_HBA_ERROR ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
- goto error_ct_unsol_exit;
-
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
- dmabuf = bdeBuf1;
- else {
- dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
- piocbq->iocb.un.cont64[0].addrLow);
- dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
- }
- if (dmabuf == NULL)
- goto error_ct_unsol_exit;
- ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+ ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
evt_req_id = ct_req->FsType;
- cmd = ct_req->CommandResponse.bits.CmdRsp;
- len = ct_req->CommandResponse.bits.Size;
- if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+ cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
@@ -941,12 +952,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
- evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
+ else
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ iocb = &iocbq->iocb;
+ for (i = 0; i < iocb->ulpBdeCount;
+ i++)
evt_dat->len +=
- iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ iocb->un.cont64[i].tus.f.bdeSize;
}
}
@@ -966,22 +982,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- bdeBuf1 = iocbq->context2;
- bdeBuf2 = iocbq->context3;
+ bdeBuf1 = iocbq->cmd_dmabuf;
+ bdeBuf2 = iocbq->bpl_dmabuf;
}
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocbq->wcqe_cmpl.word3;
+ else
+ bde_count = iocbq->iocb.ulpBdeCount;
+ for (i = 0; i < bde_count; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.un.ulpWord[0];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.unsli3.
- sli3Words[4];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->unsol_rcv_len;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
@@ -998,7 +1013,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
- "found for iocbq 0x%p\n",
+ "found for iocbq x%px\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
@@ -1035,17 +1050,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_in_buf_free(phba,
dmabuf);
} else {
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
break;
}
}
@@ -1068,14 +1083,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
- piocbq->iocb.ulpContext;
+ get_job_ulpcontext(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.unsli3.rcvsli3.ox_id;
+ get_job_rcvoxid(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].SID =
- piocbq->iocb.un.rcvels.remoteID;
+ bf_get(wqe_els_did,
+ &piocbq->wqe.xmit_els_rsp.wqe_dest);
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
- evt_dat->immed_dat = piocbq->iocb.ulpContext;
+ evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
@@ -1092,13 +1108,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dd_data->set_job = NULL;
lpfc_bsg_event_unref(evt);
if (job) {
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply = job->reply;
+ bsg_reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
/* complete the job back to userspace */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
}
}
@@ -1159,10 +1177,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
* @job: SET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
struct set_ct_event *event_req;
struct lpfc_bsg_event *evt;
int rc = 0;
@@ -1180,7 +1199,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
}
event_req = (struct set_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
FC_REG_EVENT_MASK);
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1231,9 +1250,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
return 0; /* call job done later */
job_error:
- if (dd_data != NULL)
- kfree(dd_data);
-
+ kfree(dd_data);
job->dd_data = NULL;
return rc;
}
@@ -1243,13 +1260,15 @@ job_error:
* @job: GET_EVENT fc_bsg_job
**/
static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
- struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_event *evt, *evt_next;
struct event_data *evt_dat = NULL;
unsigned long flags;
uint32_t rc = 0;
@@ -1264,12 +1283,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
}
event_req = (struct get_ct_event *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
event_reply = (struct get_ct_event_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
- list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+ list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
@@ -1287,7 +1306,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
* an error indicating that there isn't anymore
*/
if (evt_dat == NULL) {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
goto job_error;
}
@@ -1303,12 +1322,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
if (evt_dat->len > 0)
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (evt_dat) {
kfree(evt_dat->data);
@@ -1319,13 +1338,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
lpfc_bsg_event_unref(evt);
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->dd_data = NULL;
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return 0;
job_error:
job->dd_data = NULL;
- job->reply->result = rc;
+ bsg_reply->result = rc;
return rc;
}
@@ -1352,14 +1372,15 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct lpfc_iocbq *rspiocbq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
- IOCB_t *rsp;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ u32 ulp_status, ulp_word4;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1370,17 +1391,25 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
}
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* Close the timeout handler abort window */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
ndlp = dd_data->context_un.iocb.ndlp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
/* Copy the completed job data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ bsg_reply = job->reply;
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1395,7 +1424,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
}
}
@@ -1409,8 +1438,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -1420,20 +1450,31 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @job: Pointer to the job object.
* @tag: tag index value into the ports context exchange array.
- * @bmp: Pointer to a dma buffer descriptor.
+ * @cmp: Pointer to a cmp dma buffer descriptor.
+ * @bmp: Pointer to a bmp dma buffer descriptor.
* @num_entry: Number of enties in the bde.
**/
static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
- IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
+ unsigned long flags;
uint32_t creg_val;
+ u16 ulp_context, iotag;
+
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ phba->ct_ctx[tag].rxid,
+ phba->ct_ctx[tag].SID);
+ return IOCB_ERROR;
+ }
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -1451,82 +1492,53 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
goto no_ctiocb;
}
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- /* Fill in rest of iocb */
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].rxid;
- icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
- ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
- if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2721 ndlp null for oxid %x SID %x\n",
- icmd->ulpContext,
- phba->ct_ctx[tag].SID);
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
- /* Check if the ndlp is active */
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- /* get a refernece count so the ndlp doesn't go away while
- * we respond
- */
- if (!lpfc_nlp_get(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ phba->ct_ctx[tag].oxid, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
- } else
- icmd->ulpContext = (ushort) tag;
+ iotag = get_wqe_reqtag(ctiocb);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ ctiocb->num_bdes = num_entry;
+ iotag = ctiocb->iocb.ulpIoTag;
+ }
- icmd->ulpTimeout = phba->fc_ratov * 2;
+ ulp_context = get_job_ulpcontext(phba, ctiocb);
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ ulp_context, iotag, tag, phba->link_state);
- ctiocb->iocb_cmpl = NULL;
- ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
- ctiocb->context1 = dd_data;
- ctiocb->context2 = cmp;
- ctiocb->context3 = bmp;
- ctiocb->context_un.ndlp = ndlp;
- ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->context_un.dd_data = dd_data;
+ ctiocb->cmd_dmabuf = cmp;
+ ctiocb->bpl_dmabuf = bmp;
+ ctiocb->ndlp = ndlp;
+ ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
- dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
+ if (!dd_data->context_un.iocb.ndlp) {
+ rc = -IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
dd_data->context_un.iocb.rmp = NULL;
job->dd_data = dd_data;
@@ -1541,9 +1553,20 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
-
- if (rc == IOCB_SUCCESS)
+ if (rc == IOCB_SUCCESS) {
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O had not been completed/released */
+ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
+ /* open up abort window to timeout handler */
+ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
+ }
+
+ /* iocb failed so cleanup */
+ job->dd_data = NULL;
+ lpfc_nlp_put(ndlp);
issue_ct_rsp_exit:
lpfc_sli_release_iocbq(phba, ctiocb);
@@ -1558,12 +1581,14 @@ no_dd_data:
* @job: SEND_MGMT_RESP fc_bsg_job
**/
static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
int bpl_entries;
@@ -1573,7 +1598,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
int rc = 0;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
rc = -ERANGE;
@@ -1619,7 +1644,7 @@ send_mgmt_rsp_free_bmp:
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
return rc;
}
@@ -1637,6 +1662,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct lpfc_sli *psli;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
int i = 0;
@@ -1644,9 +1670,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
if (!psli)
return -ENODEV;
- pring = &psli->ring[LPFC_FCP_RING];
- if (!pring)
- return -ENODEV;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -1665,10 +1688,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
scsi_block_requests(shost);
}
- while (!list_empty(&pring->txcmplq)) {
- if (i++ > 500) /* wait up to 5 seconds */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ pring = &psli->sli3_ring[LPFC_FCP_RING];
+ lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
+ return 0;
+ }
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring || (pring->ringno != LPFC_FCP_RING))
+ continue;
+ if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+ &pring->ring_lock))
break;
- msleep(10);
}
return 0;
}
@@ -1715,8 +1746,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
* All of this is done in-line.
*/
static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
@@ -1726,7 +1759,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
int rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1746,7 +1779,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* bring the link to diagnostic mode */
loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
@@ -1819,10 +1852,11 @@ loopback_mode_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -1889,14 +1923,17 @@ link_diag_state_set_out:
}
/**
- * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
* @phba: Pointer to HBA context object.
+ * @mode: loopback mode to set
+ * @link_no: link number for loopback mode to set
*
* This function is responsible for issuing a sli4 mailbox command for setting
- * up internal loopback diagnostic.
+ * up loopback diagnostic for a link.
*/
static int
-lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
+ uint32_t link_no)
{
LPFC_MBOXQ_t *pmboxq;
uint32_t req_len, alloc_len;
@@ -1917,11 +1954,19 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
}
link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
bf_set(lpfc_mbx_set_diag_state_link_num,
- &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
- bf_set(lpfc_mbx_set_diag_state_link_type,
- &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ &link_diag_loopback->u.req, link_no);
+
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
+ } else {
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req,
+ phba->sli4_hba.lnk_info.lnk_tp);
+ }
+
bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+ mode);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -1946,9 +1991,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
- if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
@@ -1957,8 +2000,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
@@ -1970,14 +2012,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
* loopback mode in order to perform a diagnostic loopback test.
*/
static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct diag_mode_set *loopback_mode;
- uint32_t link_flags, timeout;
+ uint32_t link_flags, timeout, link_no;
int i, rc = 0;
/* no data to return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)) {
@@ -1988,12 +2032,39 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
(int)(sizeof(struct fc_bsg_request) +
sizeof(struct diag_mode_set)));
rc = -EINVAL;
- goto job_error;
+ goto job_done;
+ }
+
+ loopback_mode = (struct diag_mode_set *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout * 100;
+
+ if (loopback_mode->physical_link == -1)
+ link_no = phba->sli4_hba.lnk_info.lnk_no;
+ else
+ link_no = loopback_mode->physical_link;
+
+ if (link_flags == DISABLE_LOOP_BACK) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
+ link_no);
+ if (!rc) {
+ /* Unset the need disable bit */
+ phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
+ }
+ goto job_done;
+ } else {
+ /* Check if we need to disable the loopback state */
+ if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
+ rc = -EPERM;
+ goto job_done;
+ }
}
rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
- goto job_error;
+ goto job_done;
/* indicate we are in loobpack diagnostic mode */
spin_lock_irq(&phba->hbalock);
@@ -2003,15 +2074,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
/* reset port to start frome scratch */
rc = lpfc_selective_reset(phba);
if (rc)
- goto job_error;
+ goto job_done;
/* bring the link to diagnostic mode */
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3129 Bring link to diagnostic state.\n");
- loopback_mode = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
- link_flags = loopback_mode->type;
- timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
if (rc) {
@@ -2039,13 +2106,54 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3132 Set up loopback mode:x%x\n", link_flags);
- if (link_flags == INTERNAL_LOOP_BACK)
- rc = lpfc_sli4_bsg_set_internal_loopback(phba);
- else if (link_flags == EXTERNAL_LOOP_BACK)
- rc = lpfc_hba_init_link_fc_topology(phba,
- FLAGS_TOPOLOGY_MODE_PT_PT,
- MBX_NOWAIT);
- else {
+ switch (link_flags) {
+ case INTERNAL_LOOP_BACK:
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+ link_no);
+ } else {
+ /* Trunk is configured, but link is not in this trunk */
+ if (phba->sli4_hba.conf_trunk) {
+ rc = -ELNRNG;
+ goto loopback_mode_exit;
+ }
+
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
+ link_no);
+ }
+
+ if (!rc) {
+ /* Set the need disable bit */
+ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+ }
+
+ break;
+ case EXTERNAL_LOOP_BACK:
+ if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
+ link_no);
+ } else {
+ /* Trunk is configured, but link is not in this trunk */
+ if (phba->sli4_hba.conf_trunk) {
+ rc = -ELNRNG;
+ goto loopback_mode_exit;
+ }
+
+ rc = lpfc_sli4_bsg_set_loopback_mode(phba,
+ LPFC_DIAG_LOOPBACK_TYPE_SERDES,
+ link_no);
+ }
+
+ if (!rc) {
+ /* Set the need disable bit */
+ phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
+ }
+
+ break;
+ default:
rc = -EINVAL;
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3141 Loopback mode:x%x not supported\n",
@@ -2104,12 +2212,13 @@ loopback_mode_exit:
}
lpfc_bsg_diag_mode_exit(phba);
-job_error:
+job_done:
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2121,17 +2230,17 @@ job_error:
* command from the user to proper driver action routines.
*/
static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
{
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
int rc;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2140,7 +2249,7 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
if (phba->sli_rev < LPFC_SLI_REV4)
rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
- else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
LPFC_SLI_INTF_IF_TYPE_2)
rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
else
@@ -2157,8 +2266,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
* command from the user to proper driver action routines.
*/
static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -2166,10 +2277,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
uint32_t timeout;
int rc, i;
- shost = job->shost;
+ shost = fc_bsg_to_shost(job);
if (!shost)
return -ENODEV;
- vport = (struct lpfc_vport *)job->shost->hostdata;
+ vport = shost_priv(shost);
if (!vport)
return -ENODEV;
phba = vport->phba;
@@ -2178,7 +2289,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
if (phba->sli_rev < LPFC_SLI_REV4)
return -ENODEV;
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
@@ -2187,7 +2298,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
phba->link_flag &= ~LS_LOOPBACK_MODE;
spin_unlock_irq(&phba->hbalock);
loopback_mode_end_cmd = (struct diag_mode_set *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
timeout = loopback_mode_end_cmd->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2202,7 +2313,6 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
- rc = -ETIMEDOUT;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3140 Timeout waiting for link to "
"diagnostic mode_end, timeout:%d ms\n",
@@ -2219,10 +2329,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
loopback_mode_end_exit:
/* make return code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2234,46 +2345,41 @@ loopback_mode_end_exit:
* applicaiton.
*/
static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
LPFC_MBOXQ_t *pmboxq;
struct sli4_link_diag *link_diag_test_cmd;
uint32_t req_len, alloc_len;
- uint32_t timeout;
struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
struct diag_status *diag_status_reply;
- int mbxstatus, rc = 0;
+ int mbxstatus, rc = -ENODEV, rc1 = 0;
- shost = job->shost;
- if (!shost) {
- rc = -ENODEV;
+ shost = fc_bsg_to_shost(job);
+ if (!shost)
goto job_error;
- }
- vport = (struct lpfc_vport *)job->shost->hostdata;
- if (!vport) {
- rc = -ENODEV;
+
+ vport = shost_priv(shost);
+ if (!vport)
goto job_error;
- }
+
phba = vport->phba;
- if (!phba) {
- rc = -ENODEV;
+ if (!phba)
goto job_error;
- }
- if (phba->sli_rev < LPFC_SLI_REV4) {
- rc = -ENODEV;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
goto job_error;
- }
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2) {
- rc = -ENODEV;
+
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+ LPFC_SLI_INTF_IF_TYPE_2)
goto job_error;
- }
if (job->request_len < sizeof(struct fc_bsg_request) +
sizeof(struct sli4_link_diag)) {
@@ -2292,8 +2398,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
link_diag_test_cmd = (struct sli4_link_diag *)
- job->request->rqst_data.h_vendor.vendor_cmd;
- timeout = link_diag_test_cmd->timeout * 100;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
@@ -2315,6 +2420,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
rc = -ENOMEM;
goto link_diag_test_exit;
}
+
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
phba->sli4_hba.lnk_info.lnk_no);
@@ -2343,15 +2449,14 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
diag_status_reply = (struct diag_status *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3012 Received Run link diag test reply "
"below minimum size (%d): reply_len:%d\n",
- (int)(sizeof(struct fc_bsg_request) +
- sizeof(struct diag_status)),
+ (int)(sizeof(*bsg_reply) +
+ sizeof(*diag_status_reply)),
job->reply_len);
rc = -EINVAL;
goto job_error;
@@ -2362,7 +2467,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
diag_status_reply->shdr_add_status = shdr_add_status;
link_diag_test_exit:
- rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -2371,10 +2476,13 @@ link_diag_test_exit:
job_error:
/* make error code available to userspace */
- job->reply->result = rc;
+ if (rc1 && !rc)
+ rc = rc1;
+ bsg_reply->result = rc;
/* complete the job back to userspace if no error */
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -2402,6 +2510,10 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
mbox, *rpi);
else {
*rpi = lpfc_sli4_alloc_rpi(phba);
+ if (*rpi == LPFC_RPI_ALLOC_ERROR) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EBUSY;
+ }
status = lpfc_reg_rpi(phba, phba->pport->vpi,
phba->pport->fc_myDID,
(uint8_t *)&phba->pport->fc_sparam,
@@ -2415,9 +2527,9 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENOMEM;
}
- dmabuff = (struct lpfc_dmabuf *) mbox->context1;
- mbox->context1 = NULL;
- mbox->context2 = NULL;
+ dmabuff = mbox->ctx_buf;
+ mbox->ctx_buf = NULL;
+ mbox->ctx_ndlp = NULL;
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2484,7 +2596,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
*
* This function obtains the transmit and receive ids required to send
* an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
- * flags are used to the unsolicted response handler is able to process
+ * flags are used to the unsolicited response handler is able to process
* the ct command sent on the same port.
**/
static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
@@ -2492,14 +2604,14 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
int ret_val = 0;
int time_left;
- int iocb_stat = 0;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
+ u32 status;
*txxri = 0;
*rxxri = 0;
@@ -2543,9 +2655,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
goto err_get_xri_exit;
}
- cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
-
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
@@ -2555,42 +2664,30 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
-
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
-
- cmd->un.xseq64.w5.hcsw.Fctl = LA;
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = rpi;
-
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->bpl_dmabuf = dmabuf;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
+ FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq,
- (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
- if (iocb_stat) {
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ status = get_job_ulpstatus(phba, rspiocbq);
+ if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
ret_val = -EIO;
goto err_get_xri_exit;
}
- *txxri = rsp->ulpContext;
+ *txxri = get_job_ulpcontext(phba, rspiocbq);
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
if (list_empty(&evt->events_to_see))
ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
else {
@@ -2626,8 +2723,8 @@ err_get_xri_exit:
* lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
* @phba: Pointer to HBA context object
*
- * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
- * retruns the pointer to the buffer.
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
+ * returns the pointer to the buffer.
**/
static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
@@ -2650,7 +2747,6 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
kfree(dmabuf);
return NULL;
}
- memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
return dmabuf;
}
@@ -2758,9 +2854,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
if (nocopydata) {
bpl->tus.f.bdeFlags = 0;
- pci_dma_sync_single_for_device(phba->pcidev,
- dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
} else {
memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -2778,27 +2871,28 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
size -= cnt;
}
- mlist->flag = i;
- return mlist;
+ if (mlist) {
+ mlist->flag = i;
+ return mlist;
+ }
out:
diag_cmd_data_free(phba, mlist);
return NULL;
}
/**
- * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
* This function allocates and posts a data buffer of sufficient size to receive
- * an unsolicted CT command.
+ * an unsolicited CT command.
**/
-static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
- size_t len)
+static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd = NULL;
struct list_head head, *curr, *next;
@@ -2812,6 +2906,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
int iocb_stat;
int i = 0;
+ pring = lpfc_phba_elsring(phba);
+
cmdiocbq = lpfc_sli_get_iocbq(phba);
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
@@ -2823,7 +2919,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
}
}
- if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
ret_val = -ENOMEM;
goto err_post_rxbufs_exit;
}
@@ -2831,7 +2927,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
-
cmd = &cmdiocbq->iocb;
i = 0;
@@ -2861,7 +2956,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
cmd->un.cont64[i].tus.f.bdeSize =
((struct lpfc_dmabufext *)mp[i])->size;
- cmd->ulpBdeCount = ++i;
+ cmd->ulpBdeCount = ++i;
if ((--num_bde > 0) && (i < 2))
continue;
@@ -2899,7 +2994,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
-
cmd = &cmdiocbq->iocb;
i = 0;
}
@@ -2938,11 +3032,11 @@ err_post_rxbufs_exit:
* of loopback mode.
**/
static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
- struct diag_mode_test *diag_mode;
struct lpfc_bsg_event *evt;
struct event_data *evdat;
struct lpfc_sli *psli = &phba->sli;
@@ -2951,7 +3045,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
- IOCB_t *cmd, *rsp = NULL;
+ union lpfc_wqe128 *cmdwqe, *rspwqe;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
@@ -2963,13 +3057,13 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
int time_left;
- int iocb_stat;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
void *dataout = NULL;
uint32_t total_mem;
/* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -2985,8 +3079,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = -EINVAL;
goto loopback_test_exit;
}
- diag_mode = (struct diag_mode_test *)
- job->request->rqst_data.h_vendor.vendor_cmd;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -3046,7 +3138,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
goto loopback_test_exit;
}
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
@@ -3089,9 +3181,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
goto err_loopback_test_exit;
}
- cmd = &cmdiocbq->iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
- rsp = &rspiocbq->iocb;
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+ memset(rspwqe, 0, sizeof(*rspwqe));
+ }
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -3104,8 +3199,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
ctreq->RevisionId.bits.InId = 0;
ctreq->FsType = SLI_CT_ELX_LOOPBACK;
ctreq->FsSubType = 0;
- ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
- ctreq->CommandResponse.bits.Size = size;
+ ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA);
+ ctreq->CommandResponse.bits.Size = cpu_to_be16(size);
segment_offset = ELX_LOOPBACK_HEADER_SZ;
} else
segment_offset = 0;
@@ -3123,38 +3218,35 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+ cmdiocbq->num_bdes = num_bde;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ if (phba->cfg_vmid_app_header)
+ cmdiocbq->cmd_flag |= LPFC_IO_VMID;
- cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+ cmdiocbq->bpl_dmabuf = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
- cmd->ulpContext = txxri;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
+ num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+
} else {
- cmd->un.xseq64.bdl.ulpIoTag32 = 0;
- cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
- cmdiocbq->context3 = txbmp;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
+ phba->sli4_hba.rpi_ids[rpi], 0xffff,
+ full_size, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
cmdiocbq->sli4_xritag = NO_XRI;
- cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
+
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
-
- if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOCB_SUCCESS))) {
+ if (iocb_stat != IOCB_SUCCESS ||
+ (phba->sli_rev < LPFC_SLI_REV4 &&
+ (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
@@ -3165,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
evt->waiting = 1;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
- msecs_to_jiffies(1000 *
- ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+ secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT));
evt->waiting = 0;
if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
@@ -3193,11 +3284,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = IOCB_SUCCESS;
/* skip over elx loopback header */
rx_databuf += ELX_LOOPBACK_HEADER_SZ;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
rx_databuf, size);
- job->reply->reply_payload_rcv_len = size;
+ bsg_reply->reply_payload_rcv_len = size;
}
}
@@ -3209,7 +3300,7 @@ err_loopback_test_exit:
lpfc_bsg_event_unref(evt); /* delete */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- if (cmdiocbq != NULL)
+ if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
lpfc_sli_release_iocbq(phba, cmdiocbq);
if (rspiocbq != NULL)
@@ -3227,11 +3318,12 @@ err_loopback_test_exit:
loopback_test_exit:
kfree(dataout);
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
if (rc == IOCB_SUCCESS)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3240,11 +3332,11 @@ loopback_test_exit:
* @job: GET_DFC_REV fc_bsg_job
**/
static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
- struct get_mgmt_rev *event_req;
struct get_mgmt_rev_reply *event_reply;
int rc = 0;
@@ -3257,14 +3349,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
goto job_error;
}
- event_req = (struct get_mgmt_rev *)
- job->request->rqst_data.h_vendor.vendor_cmd;
-
event_reply = (struct get_mgmt_rev_reply *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2741 Received GET_DFC_REV reply below "
"minimum size\n");
@@ -3275,9 +3363,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
job_error:
- job->reply->result = rc;
+ bsg_reply->result = rc;
if (rc == 0)
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -3289,19 +3378,20 @@ job_error:
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
- * will wake up thread waiting on the wait queue pointed by context1
+ * will wake up thread waiting on the wait queue pointed by dd_data
* of the mailbox.
**/
-void
+static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
+ struct bsg_job *job;
uint32_t size;
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- dd_data = pmboxq->context1;
+ dd_data = pmboxq->ctx_u.dd_data;
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3324,8 +3414,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Copy the mailbox data to the job if it is still active */
if (job) {
+ bsg_reply = job->reply;
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
@@ -3339,8 +3430,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* Complete the job if the job is still active */
if (job) {
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
}
return;
}
@@ -3371,13 +3463,14 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_RUN_DIAGS:
case MBX_RESTART:
case MBX_SET_MASK:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2743 Command 0x%x is illegal in on-line "
"state\n",
mb->mbxCommand);
return -EPERM;
}
+ break;
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_LOAD_SM:
@@ -3412,15 +3505,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -3438,7 +3522,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
}
/**
- * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
* @phba: Pointer to HBA context object.
*
* This is routine clean up and reset BSG handling of multi-buffer mbox
@@ -3463,6 +3547,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
}
/**
+ * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded
+ * READ_OBJECT_V0 mailbox commands
+ * @phba: pointer to lpfc_hba data struct
+ * @pmb_buf: pointer to mailbox buffer
+ * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region
+ * @job: pointer to bsg_job struct
+ * @bsg_reply: point to bsg_reply struct
+ *
+ * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies
+ * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job
+ * structure for passing back to application layer.
+ *
+ * Return codes
+ * 0 - successful
+ * -EINVAL - invalid HBD_CNT
+ * -ENODEV - pointer to bsg_job struct is NULL
+ **/
+static int
+lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf,
+ struct lpfc_sli_config_mbox *sli_cfg_mbx,
+ struct bsg_job *job,
+ struct fc_bsg_reply *bsg_reply)
+{
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ u32 hbd_cnt;
+ u32 dma_buf_len;
+ u8 i = 0;
+ size_t extra_bytes;
+ off_t skip = 0;
+
+ if (!job) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2496 NULL job\n");
+ return -ENODEV;
+ }
+
+ if (!bsg_reply) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2498 NULL bsg_reply\n");
+ return -ENODEV;
+ }
+
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
+
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+
+ /* Calculate where the read object's read data payload is located based
+ * on HBD count scheme.
+ */
+ if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) {
+ skip = rd_obj_scheme[0].payload_word_offset * 4;
+ } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) {
+ skip = rd_obj_scheme[1].payload_word_offset * 4;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2497 bad hbd_count 0x%08x\n",
+ hbd_cnt);
+ return -EINVAL;
+ }
+
+ /* Copy SLI_CONFIG command and READ_OBJECT response first */
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, skip);
+
+ /* Copy data from hbds */
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ list) {
+ dma_buf_len = emb0_subsys->hbd[i].buf_len;
+
+ /* Use sg_copy_buffer to specify a skip offset */
+ extra_bytes = sg_copy_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ curr_dmabuf->virt,
+ dma_buf_len, skip, false);
+
+ bsg_reply->reply_payload_rcv_len += extra_bytes;
+
+ skip += extra_bytes;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2499 copied hbd[%d] "
+ "0x%zx bytes\n",
+ i, extra_bytes);
+ i++;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
@@ -3470,25 +3651,27 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
* This is routine handles BSG job for mailbox commands completions with
* multiple external buffers.
**/
-static struct fc_bsg_job *
+static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply = NULL;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
- uint32_t size;
+ u32 size, opcode;
int rc = 0;
struct lpfc_dmabuf *dmabuf;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- dd_data = pmboxq->context1;
+ dd_data = pmboxq->ctx_u.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
job = dd_data->set_job;
if (job) {
+ bsg_reply = job->reply;
/* Prevent timeout handling from trying to abort job */
job->dd_data = NULL;
}
@@ -3513,25 +3696,45 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ if (job) {
+ rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf,
+ sli_cfg_mbx,
+ job,
+ bsg_reply);
+ bsg_reply->result = rc;
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
}
/* Complete the job if the job is still active */
if (job) {
size = job->reply_payload.payload_len;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
/* result for successful */
- job->reply->result = 0;
+ bsg_reply->result = 0;
+done:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2937 SLI_CONFIG ext-buffer maibox command "
+ "2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
- phba->mbox_ext_buf_ctx.mboxType, size);
+ phba->mbox_ext_buf_ctx.mboxType,
+ job->reply_payload.payload_len);
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType,
@@ -3539,7 +3742,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2938 SLI_CONFIG ext-buffer maibox "
+ "2938 SLI_CONFIG ext-buffer mailbox "
"command (x%x/x%x) failure, rc:x%x\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, rc);
@@ -3563,7 +3766,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3572,7 +3776,7 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2939 SLI_CONFIG ext-buffer rd maibox command "
+ "2939 SLI_CONFIG ext-buffer rd mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@@ -3583,9 +3787,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
-
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3600,7 +3806,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
- struct fc_bsg_job *job;
+ struct bsg_job *job;
+ struct fc_bsg_reply *bsg_reply;
job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
@@ -3609,7 +3816,7 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2940 SLI_CONFIG ext-buffer wr maibox command "
+ "2940 SLI_CONFIG ext-buffer wr mailbox command "
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
@@ -3618,8 +3825,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_bsg_mbox_ext_session_reset(phba);
/* if the job is still active, call job done */
- if (job)
- job->job_done(job);
+ if (job) {
+ bsg_reply = job->reply;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ }
return;
}
@@ -3718,40 +3928,44 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
}
/**
- * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
+ * @job: Pointer to the job object.
* @nemb_tp: Enumerate of non-embedded mailbox command type.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ struct list_head *ext_dmabuf_list;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
- uint32_t ext_buf_cnt, ext_buf_index;
+ u32 ext_buf_cnt, ext_buf_index, hbd_cnt;
struct lpfc_dmabuf *ext_dmabuf = NULL;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
- uint8_t *pmbx;
+ u8 *pmbx, opcode;
int rc, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
- &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ &emb0_subsys->sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2945 Handled SLI_CONFIG(mse) rd, "
@@ -3761,12 +3975,63 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -ERANGE;
goto job_error;
}
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2449 SLI_CONFIG(mse) rd non-embedded "
+ "hbd count = %d\n",
+ hbd_cnt);
+
+ ext_dmabuf_list =
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list;
+
+ /* Allocate hbds */
+ for (i = 0; i < hbd_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ ext_dmabuf_list);
+ }
+
+ /* Fill out the physical memory addresses for the
+ * hbds
+ */
+ i = 0;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ ext_dmabuf_list, list) {
+ emb0_subsys->hbd[i].pa_hi =
+ putPaddrHigh(curr_dmabuf->phys);
+ emb0_subsys->hbd[i].pa_lo =
+ putPaddrLow(curr_dmabuf->phys);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2495 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, "
+ "addrLo:x%x\n", i,
+ emb0_subsys->hbd[i].buf_len,
+ emb0_subsys->hbd[i].pa_hi,
+ emb0_subsys->hbd[i].pa_lo);
+ i++;
+ }
+ break;
+ default:
+ break;
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2941 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
} else {
/* sanity check on interface type for support */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2) {
rc = -ENODEV;
goto job_error;
@@ -3861,7 +4126,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -3887,12 +4152,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
job_error:
@@ -3908,17 +4173,20 @@ job_error:
/**
* lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
- * non-embedded external bufffers.
+ * non-embedded external buffers.
**/
static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
enum nemb_type nemb_tp,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct dfc_mbox_req *mbox_req;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint32_t ext_buf_cnt;
@@ -3929,7 +4197,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* pointer to the start of mailbox command */
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3950,7 +4218,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"ext_buf_cnt:%d\n", ext_buf_cnt);
} else {
/* sanity check on interface type for support */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
/* nemb_tp == nemb_hbd */
@@ -4030,7 +4298,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4044,20 +4312,21 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
- /* wait for additoinal external buffers */
+ /* wait for additional external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
@@ -4071,15 +4340,15 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
- * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
- * with embedded sussystem 0x1 and opcodes with external HBDs.
+ * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded subsystem 0x1 and opcodes with external HBDs.
**/
static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4101,6 +4370,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
switch (opcode) {
case FCOE_OPCODE_READ_FCF:
+ case FCOE_OPCODE_GET_DPORT_RESULTS:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2957 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
@@ -4109,6 +4379,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
nemb_mse, dmabuf);
break;
case FCOE_OPCODE_ADD_FCF:
+ case FCOE_OPCODE_SET_DPORT_MODE:
+ case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2958 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
@@ -4128,6 +4400,9 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
switch (opcode) {
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
+ case COMN_OPCODE_GET_PROFILE_CONFIG:
+ case COMN_OPCODE_SET_FEATURES:
+ case COMN_OPCODE_READ_OBJECT:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
@@ -4199,7 +4474,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/**
- * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
* @phba: Pointer to HBA context object.
*
* This routine is for requesting to abort a pass-through mailbox command with
@@ -4218,14 +4493,15 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
/**
* lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
* @phba: Pointer to HBA context object.
- * @dmabuf: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
*
* This routine extracts the next mailbox read external buffer back to
* user space through BSG.
**/
static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
{
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
struct lpfc_dmabuf *dmabuf;
uint8_t *pbuf;
@@ -4263,7 +4539,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf, index);
pbuf = (uint8_t *)dmabuf->virt;
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pbuf, size);
@@ -4277,8 +4553,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
lpfc_bsg_mbox_ext_session_reset(phba);
}
- job->reply->result = 0;
- job->job_done(job);
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
}
@@ -4286,16 +4563,17 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
/**
* lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
* @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
* @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine sets up the next mailbox read external buffer obtained
* from user space through BSG.
**/
static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
- struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
@@ -4309,15 +4587,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
- phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
-
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4354,6 +4623,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4371,7 +4647,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
/* context fields to callback function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4385,22 +4661,25 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
- "maibox command, rc:x%x\n", rc);
+ "mailbox command, rc:x%x\n", rc);
rc = -EPIPE;
goto job_error;
}
- /* wait for additoinal external buffers */
- job->reply->result = 0;
- job->job_done(job);
+ /* wait for additional external buffers */
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
@@ -4410,14 +4689,14 @@ job_error:
/**
* lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
* This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
* command with multiple non-embedded external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
int rc;
@@ -4455,21 +4734,22 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
/**
* lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a BSG mailbox object.
- * @dmabuff: Pointer to a DMA buffer descriptor.
+ * @job: Pointer to the job object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
*
- * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
* (0x9B) mailbox commands and external buffers.
**/
static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
+ struct fc_bsg_request *bsg_request = job->request;
struct dfc_mbox_req *mbox_req;
int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4528,20 +4808,22 @@ sli_cfg_ext_error:
/**
* lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
* @phba: Pointer to HBA context object.
- * @mb: Pointer to a mailbox object.
+ * @job: Pointer to the job object.
* @vport: Pointer to a vport object.
*
* Allocate a tracking object, mailbox command memory, get a mailbox
* from the mailbox pool, copy the caller mailbox command.
*
* If offline and the sli is active we need to poll for the command (port is
- * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * being reset) and complete the job, otherwise issue the mailbox command and
* let our completion handler finish the command.
**/
-static uint32_t
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+static int
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
struct lpfc_vport *vport)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4553,7 +4835,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t transmit_length, receive_length, mode;
struct lpfc_mbx_sli4_config *sli4_config;
struct lpfc_mbx_nembed_cmd *nembed_sge;
- struct mbox_header *header;
struct ulp_bde64 *bde;
uint8_t *ext = NULL;
int rc = 0;
@@ -4561,11 +4842,10 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t size;
/* in case no data is transferred */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
- if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
- job->request_payload.payload_len > BSG_MBOX_SIZE) {
+ if (job->request_payload.payload_len > BSG_MBOX_SIZE) {
rc = -ERANGE;
goto job_done;
}
@@ -4574,13 +4854,13 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
* Don't allow mailbox commands to be sent when blocked or when in
* the middle of discovery
*/
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
rc = -EAGAIN;
goto job_done;
}
mbox_req =
- (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+ (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
/* check if requested extended data lengths are valid */
if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4636,6 +4916,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
+ /* non-embedded SLI_CONFIG requests already parsed, check others */
+ if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2729 Cmd x%x (x%x/x%x) request has "
+ "out-of-range reply payload length x%x\n",
+ pmb->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, pmboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, pmboxq),
+ job->reply_payload.payload_len);
+ rc = -ERANGE;
+ goto job_done;
+ }
+
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
@@ -4653,7 +4946,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
from = pmbx;
ext = from + sizeof(MAILBOX_t);
- pmboxq->context2 = ext;
+ pmboxq->ext_buf = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
pmboxq->out_ext_byte_len =
@@ -4751,8 +5044,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* rebuild the command for sli4 using our
* own buffers like we do for biu diags
*/
- header = (struct mbox_header *)
- &pmb->un.varWords[0];
nembed_sge = (struct lpfc_mbx_nembed_cmd *)
&pmb->un.varWords[0];
receive_length = nembed_sge->sge[0].length;
@@ -4783,7 +5074,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
/* setup context field to pass wait_queue pointer to wake function */
- pmboxq->context1 = dd_data;
+ pmboxq->ctx_u.dd_data = dd_data;
dd_data->type = TYPE_MBOX;
dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
@@ -4794,7 +5085,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
job->dd_data = dd_data;
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) ||
(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
if (rc != MBX_SUCCESS) {
@@ -4804,7 +5095,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* job finished, copy the data */
memcpy(pmbx, pmb, sizeof(*pmb));
- job->reply->reply_payload_rcv_len =
+ bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmbx, size);
@@ -4833,15 +5124,17 @@ job_cont:
* @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
**/
static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct dfc_mbox_req *mbox_req;
int rc = 0;
/* mix-and-match backward compatibility */
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4852,7 +5145,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
sizeof(struct fc_bsg_request)),
(int)sizeof(struct dfc_mbox_req));
mbox_req = (struct dfc_mbox_req *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
mbox_req->extMboxTag = 0;
mbox_req->extSeqNum = 0;
}
@@ -4861,294 +5154,534 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
if (rc == 0) {
/* job done */
- job->reply->result = 0;
+ bsg_reply->result = 0;
job->dd_data = NULL;
- job->job_done(job);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
} else if (rc == 1)
/* job submitted, will complete later*/
rc = 0; /* return zero, no error */
else {
/* some error occurred */
- job->reply->result = rc;
+ bsg_reply->result = rc;
job->dd_data = NULL;
}
return rc;
}
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct forced_link_speed_support_reply *forced_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct get_forced_link_speed_support)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0048 Received FORCED_LINK_SPEED request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply = (struct forced_link_speed_support_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "0049 Received FORCED_LINK_SPEED reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED,
+ &phba->hba_flag)
+ ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+ : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+ bsg_reply->result = rc;
+ if (rc == 0)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
* @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
*
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
+ * Check if FW Logging support by the adapter
**/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
{
- struct bsg_job_data *dd_data;
- struct fc_bsg_job *job;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
- dd_data = cmdiocbq->context1;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
+ ras_fwlog = &phba->ras_fwlog;
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
+ if (!ras_fwlog->ras_hwsupport)
+ return -EACCES;
+ else if (!ras_fwlog->ras_enabled)
+ return -EPERM;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_get_ras_config_reply *ras_reply;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6192 FW_LOG request received "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
}
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- /* Copy the job data or set the failing status for the job */
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc)
+ goto ras_job_error;
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
+ ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- job->reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ /* Current logging state */
+ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state == ACTIVE)
+ ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+ else
+ ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+ ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_set_ras_config_req *ras_req;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint8_t action = 0, log_level = 0;
+ int rc = 0, action_status = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_set_ras_config_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6182 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc)
+ goto ras_job_error;
+
+ ras_req = (struct lpfc_bsg_set_ras_config_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ action = ras_req->action;
+ log_level = ras_req->log_level;
+
+ if (action == LPFC_RASACTION_STOP_LOGGING) {
+ /* Check if already disabled */
+ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->ras_fwlog_lock);
+ rc = -ESRCH;
+ goto ras_job_error;
+ }
+ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ /* Disable logging */
+ lpfc_ras_stop_fwlog(phba);
+ } else {
+ /*action = LPFC_RASACTION_START_LOGGING*/
+
+ /* Even though FW-logging is active re-initialize
+ * FW-logging with new log-level. Return status
+ * "Logging already Running" to caller.
+ **/
+ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state != INACTIVE)
+ action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->ras_fwlog_lock);
+
+ /* Enable logging */
+ rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ if (rc) {
+ rc = -EINVAL;
+ goto ras_job_error;
}
+ /* Check if FW-logging is re-initialized */
+ if (action_status == -EINPROGRESS)
+ rc = action_status;
}
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
+ /* complete the job back to userspace */
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
- /* Complete the job if active */
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_bsg_get_ras_lwpd *ras_reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ u32 *lwpd_ptr = NULL;
+ int rc = 0;
- if (job) {
- job->reply->result = rc;
- job->job_done(job);
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc)
+ goto ras_job_error;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6183 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
}
- return;
+ ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (!ras_fwlog->lwpd.virt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6193 Restart FW Logging\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Get lwpd offset */
+ lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
+ ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
+
+ /* Get wrap count */
+ ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
}
/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * lpfc_bsg_get_ras_fwlog: Read FW log
* @job: fc_bsg_job to handle
*
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
+ * Copy the FW log into the passed buffer.
**/
static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_bsg_get_fwlog_req *ras_req;
+ u32 rd_offset, rd_index, offset;
+ void *src, *fwlog_buff;
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf, *next;
int rc = 0;
- struct menlo_command *menlo_cmd;
- struct menlo_response *menlo_resp;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
- /* in case no data is returned return just the return code */
- job->reply->reply_payload_rcv_len = 0;
+ ras_fwlog = &phba->ras_fwlog;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc)
+ goto ras_job_error;
+
+ /* Logging to be stopped before reading */
+ spin_lock_irq(&phba->ras_fwlog_lock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->ras_fwlog_lock);
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+ spin_unlock_irq(&phba->ras_fwlog_lock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
+ sizeof(struct lpfc_bsg_get_fwlog_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6184 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
}
- if (job->reply_len <
- sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
+ ras_req = (struct lpfc_bsg_get_fwlog_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ rd_offset = ras_req->read_offset;
+
+ /* Allocate memory to read fw log*/
+ fwlog_buff = vmalloc(ras_req->read_size);
+ if (!fwlog_buff) {
+ rc = -ENOMEM;
+ goto ras_job_error;
}
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
+ rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+ offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list, list) {
+
+ if (dmabuf->buffer_tag < rd_index)
+ continue;
+
+ src = dmabuf->virt + offset;
+ memcpy(fwlog_buff, src, ras_req->read_size);
+ break;
}
- menlo_cmd = (struct menlo_command *)
- job->request->rqst_data.h_vendor.vendor_cmd;
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ fwlog_buff, ras_req->read_size);
- menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
+ vfree(fwlog_buff);
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
+ras_job_error:
+ bsg_reply->result = rc;
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+static int
+lpfc_get_trunk_info(struct bsg_job *job)
+{
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_trunk_info *event_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2744 Received GET TRUNK _INFO request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_reply = (struct lpfc_trunk_info *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
+ "2728 Received GET TRUNK _INFO reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+ if (event_reply == NULL) {
+ rc = -EINVAL;
+ goto job_error;
}
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
+ bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
+ (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
+ (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
+ (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
+ (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
+ (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
+ bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
+ bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
+ bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
+
+ bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
+ bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
+
+ event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
+ event_reply->logical_speed =
+ phba->sli4_hba.link_state.logical_speed / 1000;
+job_error:
+ bsg_reply->result = rc;
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return rc;
+
+}
+
+static int
+lpfc_get_cgnbuf_info(struct bsg_job *job)
+{
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct get_cgnbuf_info_req *cgnbuf_req;
+ struct lpfc_cgn_info *cp;
+ uint8_t *cgn_buff;
+ size_t size, cinfosz;
+ int rc = 0;
+
+ if (job->request_len < sizeof(struct fc_bsg_request) +
+ sizeof(struct get_cgnbuf_info_req)) {
rc = -ENOMEM;
- goto free_dd;
+ goto job_exit;
}
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
+ if (!phba->sli4_hba.pc_sli4_params.cmf) {
+ rc = -ENOENT;
+ goto job_exit;
}
- INIT_LIST_HEAD(&bmp->list);
+ if (!phba->cgn_i || !phba->cgn_i->virt) {
+ rc = -ENOENT;
+ goto job_exit;
+ }
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
+ cp = phba->cgn_i->virt;
+ if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
+ rc = -EPERM;
+ goto job_exit;
}
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
+ cgnbuf_req = (struct get_cgnbuf_info_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+ /* For reset or size == 0 */
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
+ lpfc_init_congestion_stat(phba);
+ goto job_exit;
}
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
+ /* We don't want to include the CRC at the end */
+ cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
+
+ size = cgnbuf_req->read_size;
+ if (!size)
+ goto job_exit;
+
+ if (size < cinfosz) {
+ /* Just copy back what we can */
+ cinfosz = size;
+ rc = -E2BIG;
}
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
+ /* Allocate memory to read congestion info */
+ cgn_buff = vmalloc(cinfosz);
+ if (!cgn_buff) {
+ rc = -ENOMEM;
+ goto job_exit;
}
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
+ memcpy(cgn_buff, cp, cinfosz);
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ cgn_buff, cinfosz);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
+ vfree(cgn_buff);
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- job->reply->result = rc;
- job->dd_data = NULL;
+job_exit:
+ bsg_reply->result = rc;
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "2724 GET CGNBUF error: %d\n", rc);
return rc;
}
@@ -5157,9 +5690,11 @@ no_dd_data:
* @job: fc_bsg_job to handle
**/
static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
{
- int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
int rc;
switch (command) {
@@ -5190,15 +5725,32 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
+ case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+ rc = lpfc_forced_link_speed(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+ rc = lpfc_bsg_get_ras_lwpd(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+ rc = lpfc_bsg_get_ras_fwlog(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+ rc = lpfc_bsg_get_ras_config(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+ rc = lpfc_bsg_set_ras_config(job);
+ break;
+ case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
+ rc = lpfc_get_trunk_info(job);
+ break;
+ case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
+ rc = lpfc_get_cgnbuf_info(job);
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5207,15 +5759,17 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
**/
int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t msgcode;
int rc;
- msgcode = job->request->msgcode;
+ msgcode = bsg_request->msgcode;
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -5228,9 +5782,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
break;
default:
rc = -EINVAL;
- job->reply->reply_payload_rcv_len = 0;
+ bsg_reply->reply_payload_rcv_len = 0;
/* make error code available to userspace */
- job->reply->result = rc;
+ bsg_reply->result = rc;
break;
}
@@ -5239,24 +5793,28 @@ lpfc_bsg_request(struct fc_bsg_job *job)
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
**/
int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct bsg_job_data *dd_data;
unsigned long flags;
int rc = 0;
LIST_HEAD(completions);
struct lpfc_iocbq *check_iocb, *next_iocb;
+ pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return -EIO;
+
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
* so the timeout is retried. This avoids double completion issues
@@ -5282,9 +5840,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* remove it from the txq queue and call cancel iocbs.
* Otherwise, call abort iotag
*/
-
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* make sure the I/O abort window is still open */
+ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return -EAGAIN;
+ }
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
list) {
if (check_iocb == cmdiocb) {
@@ -5293,9 +5857,8 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
}
}
if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (!list_empty(&completions)) {
lpfc_sli_cancel_iocbs(phba, &completions,
IOSTAT_LOCAL_REJECT,
@@ -5314,31 +5877,6 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
-
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
- spin_unlock_irq(&phba->hbalock);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;