diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
| -rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 1470 |
1 files changed, 727 insertions, 743 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 2dc564e59430..d61d979f9b77 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -88,17 +88,9 @@ struct lpfc_bsg_mbox { uint32_t outExtWLen; /* from app */ }; -#define MENLO_DID 0x0000FC0E - -struct lpfc_bsg_menlo { - struct lpfc_iocbq *cmdiocbq; - struct lpfc_dmabuf *rmp; -}; - #define TYPE_EVT 1 #define TYPE_IOCB 2 #define TYPE_MBOX 3 -#define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; struct bsg_job *set_job; /* job waiting for this iocb to finish */ @@ -106,7 +98,6 @@ struct bsg_job_data { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; struct lpfc_bsg_mbox mbox; - struct lpfc_bsg_menlo menlo; } context_un; }; @@ -129,6 +120,16 @@ enum ELX_LOOPBACK_CMD { #define ELX_LOOPBACK_HEADER_SZ \ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) +/* For non-embedded read object command */ +#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128} +#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192} +static const struct lpfc_read_object_cmd_scheme { + u32 min_hbd_cnt; + u32 max_hbd_cnt; + u32 cmd_size; + u32 payload_word_offset; +} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1}; + struct lpfc_dmabufext { struct lpfc_dmabuf dma; uint32_t size; @@ -143,8 +144,8 @@ lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) if (mlist) { list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { - lpfc_mbuf_free(phba, mlast->virt, mlast->phys); list_del(&mlast->list); + lpfc_mbuf_free(phba, mlast->virt, mlast->phys); kfree(mlast); } lpfc_mbuf_free(phba, mlist->virt, mlist->phys); @@ -303,15 +304,14 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; - IOCB_t *rsp; struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_nodelist *ndlp; struct lpfc_bsg_iocb *iocb; unsigned long flags; - unsigned int rsp_size; int rc = 0; + u32 ulp_status, ulp_word4, total_data_placed; - dd_data = cmdiocbq->context1; + dd_data = cmdiocbq->context_un.dd_data; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); @@ -325,22 +325,24 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); - cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); iocb = &dd_data->context_un.iocb; - ndlp = iocb->ndlp; + ndlp = iocb->cmdiocbq->ndlp; rmp = iocb->rmp; - cmp = cmdiocbq->context2; - bmp = cmdiocbq->context3; - rsp = &rspiocbq->iocb; + cmp = cmdiocbq->cmd_dmabuf; + bmp = cmdiocbq->bpl_dmabuf; + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); + total_data_placed = get_job_data_placed(phba, rspiocbq); /* Copy the completed data or set the error status */ if (job) { - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + if (ulp_status) { + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch (ulp_word4 & IOERR_PARAM_MASK) { case IOERR_SEQUENCE_TIMEOUT: rc = -ETIMEDOUT; break; @@ -355,10 +357,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, rc = -EACCES; } } else { - rsp_size = rsp->un.genreq64.bdl.bdeSize; bsg_reply->reply_payload_rcv_len = lpfc_bsg_copy_data(rmp, &job->reply_payload, - rsp_size, 0); + total_data_placed, 0); } } @@ -366,8 +367,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, lpfc_free_bsg_buffers(phba, rmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); - lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); + lpfc_sli_release_iocbq(phba, cmdiocbq); kfree(dd_data); /* Complete the job if the job is still active */ @@ -388,26 +389,32 @@ static int lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) { struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); - struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data; + struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = rdata->pnode; struct fc_bsg_reply *bsg_reply = job->reply; struct ulp_bde64 *bpl = NULL; - uint32_t timeout; struct lpfc_iocbq *cmdiocbq = NULL; - IOCB_t *cmd; struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; - int request_nseg; - int reply_nseg; + int request_nseg, reply_nseg; + u32 num_entry; struct bsg_job_data *dd_data; unsigned long flags; uint32_t creg_val; int rc = 0; int iocb_stat; + u16 ulp_context; /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) + return -ENODEV; + /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { @@ -417,24 +424,12 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) goto no_dd_data; } - if (!lpfc_nlp_get(ndlp)) { - rc = -ENODEV; - goto no_ndlp; - } - - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { - rc = -ENODEV; - goto free_ndlp; - } - cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = -ENOMEM; - goto free_ndlp; + goto free_dd; } - cmd = &cmdiocbq->iocb; - bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc = -ENOMEM; @@ -468,39 +463,28 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) goto free_cmp; } - cmd->un.genreq64.bdl.ulpIoTag32 = 0; - cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); - cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); - cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - cmd->un.genreq64.bdl.bdeSize = - (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); - cmd->ulpCommand = CMD_GEN_REQUEST64_CR; - cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); - cmd->un.genreq64.w5.hcsw.Dfctl = 0; - cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; - cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; - cmd->ulpBdeCount = 1; - cmd->ulpLe = 1; - cmd->ulpClass = CLASS3; - cmd->ulpContext = ndlp->nlp_rpi; + num_entry = request_nseg + reply_nseg; + if (phba->sli_rev == LPFC_SLI_REV4) - cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; - cmd->ulpOwner = OWN_CHIP; + ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + else + ulp_context = ndlp->nlp_rpi; + + lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry, + phba->fc_ratov * 2); + + cmdiocbq->num_bdes = num_entry; cmdiocbq->vport = phba->pport; - cmdiocbq->context3 = bmp; - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - timeout = phba->fc_ratov * 2; - cmd->ulpTimeout = timeout; - - cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; - cmdiocbq->context1 = dd_data; - cmdiocbq->context2 = cmp; - cmdiocbq->context3 = bmp; - cmdiocbq->context_un.ndlp = ndlp; + cmdiocbq->cmd_dmabuf = cmp; + cmdiocbq->bpl_dmabuf = bmp; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + + cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; + cmdiocbq->context_un.dd_data = dd_data; + dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; - dd_data->context_un.iocb.ndlp = ndlp; dd_data->context_un.iocb.rmp = rmp; job->dd_data = dd_data; @@ -514,14 +498,19 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) readl(phba->HCregaddr); /* flush */ } - iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + cmdiocbq->ndlp = lpfc_nlp_get(ndlp); + if (!cmdiocbq->ndlp) { + rc = -ENODEV; + goto free_rmp; + } + iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (iocb_stat == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed yet */ - if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ - cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ @@ -532,7 +521,7 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) } /* iocb failed so cleanup */ - job->dd_data = NULL; + lpfc_nlp_put(ndlp); free_rmp: lpfc_free_bsg_buffers(phba, rmp); @@ -544,9 +533,7 @@ free_bmp: kfree(bmp); free_cmdiocbq: lpfc_sli_release_iocbq(phba, cmdiocbq); -free_ndlp: - lpfc_nlp_put(ndlp); -no_ndlp: +free_dd: kfree(dd_data); no_dd_data: /* make error code available to userspace */ @@ -580,7 +567,6 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; - IOCB_t *rsp; struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; struct fc_bsg_ctels_reply *els_reply; @@ -588,10 +574,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, unsigned long flags; unsigned int rsp_size; int rc = 0; + u32 ulp_status, ulp_word4, total_data_placed; - dd_data = cmdiocbq->context1; + dd_data = cmdiocbq->context_un.dd_data; ndlp = dd_data->context_un.iocb.ndlp; - cmdiocbq->context1 = ndlp; + cmdiocbq->ndlp = ndlp; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); @@ -605,11 +592,13 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); - cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); - rsp = &rspiocbq->iocb; - pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); + total_data_placed = get_job_data_placed(phba, rspiocbq); + pcmd = cmdiocbq->cmd_dmabuf; prsp = (struct lpfc_dmabuf *)pcmd->list.next; /* Copy the completed job data or determine the job status if job is @@ -617,31 +606,36 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, */ if (job) { - if (rsp->ulpStatus == IOSTAT_SUCCESS) { - rsp_size = rsp->un.elsreq64.bdl.bdeSize; + if (ulp_status == IOSTAT_SUCCESS) { + rsp_size = total_data_placed; bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, prsp->virt, rsp_size); - } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { + } else if (ulp_status == IOSTAT_LS_RJT) { bsg_reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); /* LS_RJT data returned in word 4 */ - rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; + rjt_data = (uint8_t *)&ulp_word4; els_reply = &bsg_reply->reply_data.ctels_reply; els_reply->status = FC_CTELS_STATUS_REJECT; els_reply->rjt_data.action = rjt_data[3]; els_reply->rjt_data.reason_code = rjt_data[2]; els_reply->rjt_data.reason_explanation = rjt_data[1]; els_reply->rjt_data.vendor_unique = rjt_data[0]; + } else if (ulp_status == IOSTAT_LOCAL_REJECT && + (ulp_word4 & IOERR_PARAM_MASK) == + IOERR_SEQUENCE_TIMEOUT) { + rc = -ETIMEDOUT; } else { rc = -EIO; } } - lpfc_nlp_put(ndlp); lpfc_els_free_iocb(phba, cmdiocbq); + + lpfc_nlp_put(ndlp); kfree(dd_data); /* Complete the job if the job is still active */ @@ -710,7 +704,6 @@ lpfc_bsg_rport_els(struct bsg_job *job) * we won't be dma into memory that is no longer allocated to for the * request. */ - cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, elscmd); if (!cmdiocbq) { @@ -718,23 +711,23 @@ lpfc_bsg_rport_els(struct bsg_job *job) goto release_ndlp; } - rpi = ndlp->nlp_rpi; - /* Transfer the request payload to allocated command dma buffer */ - sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, - ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, + cmdiocbq->cmd_dmabuf->virt, cmdsize); + rpi = ndlp->nlp_rpi; + if (phba->sli_rev == LPFC_SLI_REV4) - cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; + bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com, + phba->sli4_hba.rpi_ids[rpi]); else cmdiocbq->iocb.ulpContext = rpi; - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - cmdiocbq->context1 = dd_data; - cmdiocbq->context_un.ndlp = ndlp; - cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + cmdiocbq->context_un.dd_data = dd_data; + cmdiocbq->ndlp = ndlp; + cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; @@ -753,13 +746,12 @@ lpfc_bsg_rport_els(struct bsg_job *job) } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - if (rc == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed/released */ - if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ - cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ @@ -769,11 +761,9 @@ lpfc_bsg_rport_els(struct bsg_job *job) rc = -EIO; } - /* iocb failed so cleanup */ - job->dd_data = NULL; + /* I/O issue failed. Cleanup resources. */ linkdown_err: - cmdiocbq->context1 = ndlp; lpfc_els_free_iocb(phba, cmdiocbq); release_ndlp: @@ -902,11 +892,8 @@ diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) return 0; } -/** +/* * lpfc_bsg_ct_unsol_event - process an unsolicited CT command - * @phba: - * @pring: - * @piocbq: * * This function is called when an unsolicited CT command is received. It * forwards the event to any processes registered to receive CT events. @@ -916,51 +903,33 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocbq) { uint32_t evt_req_id = 0; - uint32_t cmd; + u16 cmd; struct lpfc_dmabuf *dmabuf = NULL; struct lpfc_bsg_event *evt; struct event_data *evt_dat = NULL; struct lpfc_iocbq *iocbq; + IOCB_t *iocb = NULL; size_t offset = 0; struct list_head head; struct ulp_bde64 *bde; dma_addr_t dma_addr; int i; - struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; - struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; - struct lpfc_hbq_entry *hbqe; + struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf; + struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf; struct lpfc_sli_ct_request *ct_req; struct bsg_job *job = NULL; struct fc_bsg_reply *bsg_reply; struct bsg_job_data *dd_data = NULL; unsigned long flags; int size = 0; + u32 bde_count = 0; INIT_LIST_HEAD(&head); list_add_tail(&head, &piocbq->list); - if (piocbq->iocb.ulpBdeCount == 0 || - piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) - goto error_ct_unsol_exit; - - if (phba->link_state == LPFC_HBA_ERROR || - (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) - goto error_ct_unsol_exit; - - if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) - dmabuf = bdeBuf1; - else { - dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, - piocbq->iocb.un.cont64[0].addrLow); - dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); - } - if (dmabuf == NULL) - goto error_ct_unsol_exit; - ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; + ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; evt_req_id = ct_req->FsType; - cmd = ct_req->CommandResponse.bits.CmdRsp; - if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); + cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); spin_lock_irqsave(&phba->ct_ev_lock, flags); list_for_each_entry(evt, &phba->ct_ev_waiters, node) { @@ -983,12 +952,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { /* take accumulated byte count from the last iocbq */ iocbq = list_entry(head.prev, typeof(*iocbq), list); - evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; + if (phba->sli_rev == LPFC_SLI_REV4) + evt_dat->len = iocbq->wcqe_cmpl.total_data_placed; + else + evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; } else { list_for_each_entry(iocbq, &head, list) { - for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) + iocb = &iocbq->iocb; + for (i = 0; i < iocb->ulpBdeCount; + i++) evt_dat->len += - iocbq->iocb.un.cont64[i].tus.f.bdeSize; + iocb->un.cont64[i].tus.f.bdeSize; } } @@ -1008,22 +982,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, list_for_each_entry(iocbq, &head, list) { size = 0; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { - bdeBuf1 = iocbq->context2; - bdeBuf2 = iocbq->context3; + bdeBuf1 = iocbq->cmd_dmabuf; + bdeBuf2 = iocbq->bpl_dmabuf; } - for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { + if (phba->sli_rev == LPFC_SLI_REV4) + bde_count = iocbq->wcqe_cmpl.word3; + else + bde_count = iocbq->iocb.ulpBdeCount; + for (i = 0; i < bde_count; i++) { if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (i == 0) { - hbqe = (struct lpfc_hbq_entry *) - &iocbq->iocb.un.ulpWord[0]; - size = hbqe->bde.tus.f.bdeSize; + size = iocbq->wqe.gen_req.bde.tus.f.bdeSize; dmabuf = bdeBuf1; } else if (i == 1) { - hbqe = (struct lpfc_hbq_entry *) - &iocbq->iocb.unsli3. - sli3Words[4]; - size = hbqe->bde.tus.f.bdeSize; + size = iocbq->unsol_rcv_len; dmabuf = bdeBuf2; } if ((offset + size) > evt_dat->len) @@ -1040,7 +1013,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (!dmabuf) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2616 No dmabuf " - "found for iocbq 0x%p\n", + "found for iocbq x%px\n", iocbq); kfree(evt_dat->data); kfree(evt_dat); @@ -1077,17 +1050,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_in_buf_free(phba, dmabuf); } else { - lpfc_post_buffer(phba, - pring, - 1); + lpfc_sli3_post_buffer(phba, + pring, + 1); } break; default: if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) - lpfc_post_buffer(phba, - pring, - 1); + lpfc_sli3_post_buffer(phba, + pring, + 1); break; } } @@ -1110,14 +1083,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->ct_ctx[ evt_dat->immed_dat].SID); phba->ct_ctx[evt_dat->immed_dat].rxid = - piocbq->iocb.ulpContext; + get_job_ulpcontext(phba, piocbq); phba->ct_ctx[evt_dat->immed_dat].oxid = - piocbq->iocb.unsli3.rcvsli3.ox_id; + get_job_rcvoxid(phba, piocbq); phba->ct_ctx[evt_dat->immed_dat].SID = - piocbq->iocb.un.rcvels.remoteID; + bf_get(wqe_els_did, + &piocbq->wqe.xmit_els_rsp.wqe_dest); phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID; } else - evt_dat->immed_dat = piocbq->iocb.ulpContext; + evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq); evt_dat->type = FC_REG_CT_EVENT; list_add(&evt_dat->node, &evt->events_to_see); @@ -1276,9 +1250,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job) return 0; /* call job done later */ job_error: - if (dd_data != NULL) - kfree(dd_data); - + kfree(dd_data); job->dd_data = NULL; return rc; } @@ -1402,13 +1374,13 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct bsg_job *job; struct fc_bsg_reply *bsg_reply; - IOCB_t *rsp; struct lpfc_dmabuf *bmp, *cmp; struct lpfc_nodelist *ndlp; unsigned long flags; int rc = 0; + u32 ulp_status, ulp_word4; - dd_data = cmdiocbq->context1; + dd_data = cmdiocbq->context_un.dd_data; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); @@ -1421,21 +1393,23 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, /* Close the timeout handler abort window */ spin_lock_irqsave(&phba->hbalock, flags); - cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING; spin_unlock_irqrestore(&phba->hbalock, flags); ndlp = dd_data->context_un.iocb.ndlp; - cmp = cmdiocbq->context2; - bmp = cmdiocbq->context3; - rsp = &rspiocbq->iocb; + cmp = cmdiocbq->cmd_dmabuf; + bmp = cmdiocbq->bpl_dmabuf; + + ulp_status = get_job_ulpstatus(phba, rspiocbq); + ulp_word4 = get_job_word4(phba, rspiocbq); /* Copy the completed job data or set the error status */ if (job) { bsg_reply = job->reply; - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + if (ulp_status) { + if (ulp_status == IOSTAT_LOCAL_REJECT) { + switch (ulp_word4 & IOERR_PARAM_MASK) { case IOERR_SEQUENCE_TIMEOUT: rc = -ETIMEDOUT; break; @@ -1476,7 +1450,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, * @phba: Pointer to HBA context object. * @job: Pointer to the job object. * @tag: tag index value into the ports context exchange array. - * @bmp: Pointer to a dma buffer descriptor. + * @cmp: Pointer to a cmp dma buffer descriptor. + * @bmp: Pointer to a bmp dma buffer descriptor. * @num_entry: Number of enties in the bde. **/ static int @@ -1484,13 +1459,22 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, int num_entry) { - IOCB_t *icmd; struct lpfc_iocbq *ctiocb = NULL; int rc = 0; struct lpfc_nodelist *ndlp = NULL; struct bsg_job_data *dd_data; unsigned long flags; uint32_t creg_val; + u16 ulp_context, iotag; + + ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); + if (!ndlp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2721 ndlp null for oxid %x SID %x\n", + phba->ct_ctx[tag].rxid, + phba->ct_ctx[tag].SID); + return IOCB_ERROR; + } /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); @@ -1508,82 +1492,53 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, goto no_ctiocb; } - icmd = &ctiocb->iocb; - icmd->un.xseq64.bdl.ulpIoTag32 = 0; - icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); - icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); - icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); - icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); - icmd->un.xseq64.w5.hcsw.Dfctl = 0; - icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; - icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; - - /* Fill in rest of iocb */ - icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; - icmd->ulpBdeCount = 1; - icmd->ulpLe = 1; - icmd->ulpClass = CLASS3; if (phba->sli_rev == LPFC_SLI_REV4) { /* Do not issue unsol response if oxid not marked as valid */ if (phba->ct_ctx[tag].valid != UNSOL_VALID) { rc = IOCB_ERROR; goto issue_ct_rsp_exit; } - icmd->ulpContext = phba->ct_ctx[tag].rxid; - icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; - ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); - if (!ndlp) { - lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, - "2721 ndlp null for oxid %x SID %x\n", - icmd->ulpContext, - phba->ct_ctx[tag].SID); - rc = IOCB_ERROR; - goto issue_ct_rsp_exit; - } - - /* Check if the ndlp is active */ - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - rc = IOCB_ERROR; - goto issue_ct_rsp_exit; - } - /* get a refernece count so the ndlp doesn't go away while - * we respond - */ - if (!lpfc_nlp_get(ndlp)) { - rc = IOCB_ERROR; - goto issue_ct_rsp_exit; - } - - icmd->un.ulpWord[3] = - phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, + phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], + phba->ct_ctx[tag].oxid, num_entry, + FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_WQE); /* The exchange is done, mark the entry as invalid */ phba->ct_ctx[tag].valid = UNSOL_INVALID; - } else - icmd->ulpContext = (ushort) tag; + iotag = get_wqe_reqtag(ctiocb); + } else { + lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry, + FC_RCTL_DD_SOL_CTL, 1, + CMD_XMIT_SEQUENCE64_CX); + ctiocb->num_bdes = num_entry; + iotag = ctiocb->iocb.ulpIoTag; + } - icmd->ulpTimeout = phba->fc_ratov * 2; + ulp_context = get_job_ulpcontext(phba, ctiocb); /* Xmit CT response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", - icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); + "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", + ulp_context, iotag, tag, phba->link_state); - ctiocb->iocb_cmpl = NULL; - ctiocb->iocb_flag |= LPFC_IO_LIBDFC; + ctiocb->cmd_flag |= LPFC_IO_LIBDFC; ctiocb->vport = phba->pport; - ctiocb->context1 = dd_data; - ctiocb->context2 = cmp; - ctiocb->context3 = bmp; - ctiocb->context_un.ndlp = ndlp; - ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; + ctiocb->context_un.dd_data = dd_data; + ctiocb->cmd_dmabuf = cmp; + ctiocb->bpl_dmabuf = bmp; + ctiocb->ndlp = ndlp; + ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = ctiocb; - dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp); + if (!dd_data->context_un.iocb.ndlp) { + rc = -IOCB_ERROR; + goto issue_ct_rsp_exit; + } dd_data->context_un.iocb.rmp = NULL; job->dd_data = dd_data; @@ -1598,13 +1553,12 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, } rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); - if (rc == IOCB_SUCCESS) { spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O had not been completed/released */ - if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { + if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) { /* open up abort window to timeout handler */ - ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING; } spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ @@ -1612,6 +1566,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, /* iocb failed so cleanup */ job->dd_data = NULL; + lpfc_nlp_put(ndlp); issue_ct_rsp_exit: lpfc_sli_release_iocbq(phba, ctiocb); @@ -1968,14 +1923,17 @@ link_diag_state_set_out: } /** - * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic + * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic * @phba: Pointer to HBA context object. + * @mode: loopback mode to set + * @link_no: link number for loopback mode to set * * This function is responsible for issuing a sli4 mailbox command for setting - * up internal loopback diagnostic. + * up loopback diagnostic for a link. */ static int -lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) +lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode, + uint32_t link_no) { LPFC_MBOXQ_t *pmboxq; uint32_t req_len, alloc_len; @@ -1996,11 +1954,19 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) } link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; bf_set(lpfc_mbx_set_diag_state_link_num, - &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); - bf_set(lpfc_mbx_set_diag_state_link_type, - &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); + &link_diag_loopback->u.req, link_no); + + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + bf_set(lpfc_mbx_set_diag_state_link_type, + &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED); + } else { + bf_set(lpfc_mbx_set_diag_state_link_type, + &link_diag_loopback->u.req, + phba->sli4_hba.lnk_info.lnk_tp); + } + bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, - LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); + mode); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { @@ -2025,9 +1991,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) static int lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) { - int rc; - - if (phba->pport->fc_flag & FC_VFI_REGISTERED) { + if (test_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3136 Port still had vfi registered: " "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", @@ -2036,8 +2000,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) phba->vpi_ids[phba->pport->vpi]); return -EINVAL; } - rc = lpfc_issue_reg_vfi(phba->pport); - return rc; + return lpfc_issue_reg_vfi(phba->pport); } /** @@ -2054,7 +2017,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; struct diag_mode_set *loopback_mode; - uint32_t link_flags, timeout; + uint32_t link_flags, timeout, link_no; int i, rc = 0; /* no data to return just the return code */ @@ -2069,12 +2032,39 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) (int)(sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set))); rc = -EINVAL; - goto job_error; + goto job_done; + } + + loopback_mode = (struct diag_mode_set *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + link_flags = loopback_mode->type; + timeout = loopback_mode->timeout * 100; + + if (loopback_mode->physical_link == -1) + link_no = phba->sli4_hba.lnk_info.lnk_no; + else + link_no = loopback_mode->physical_link; + + if (link_flags == DISABLE_LOOP_BACK) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_DISABLE, + link_no); + if (!rc) { + /* Unset the need disable bit */ + phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4); + } + goto job_done; + } else { + /* Check if we need to disable the loopback state */ + if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) { + rc = -EPERM; + goto job_done; + } } rc = lpfc_bsg_diag_mode_enter(phba); if (rc) - goto job_error; + goto job_done; /* indicate we are in loobpack diagnostic mode */ spin_lock_irq(&phba->hbalock); @@ -2084,15 +2074,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) /* reset port to start frome scratch */ rc = lpfc_selective_reset(phba); if (rc) - goto job_error; + goto job_done; /* bring the link to diagnostic mode */ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3129 Bring link to diagnostic state.\n"); - loopback_mode = (struct diag_mode_set *) - bsg_request->rqst_data.h_vendor.vendor_cmd; - link_flags = loopback_mode->type; - timeout = loopback_mode->timeout * 100; rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); if (rc) { @@ -2120,13 +2106,54 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job) lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3132 Set up loopback mode:x%x\n", link_flags); - if (link_flags == INTERNAL_LOOP_BACK) - rc = lpfc_sli4_bsg_set_internal_loopback(phba); - else if (link_flags == EXTERNAL_LOOP_BACK) - rc = lpfc_hba_init_link_fc_topology(phba, - FLAGS_TOPOLOGY_MODE_PT_PT, - MBX_NOWAIT); - else { + switch (link_flags) { + case INTERNAL_LOOP_BACK: + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, + link_no); + } else { + /* Trunk is configured, but link is not in this trunk */ + if (phba->sli4_hba.conf_trunk) { + rc = -ELNRNG; + goto loopback_mode_exit; + } + + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_INTERNAL, + link_no); + } + + if (!rc) { + /* Set the need disable bit */ + phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; + } + + break; + case EXTERNAL_LOOP_BACK: + if (phba->sli4_hba.conf_trunk & (1 << link_no)) { + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED, + link_no); + } else { + /* Trunk is configured, but link is not in this trunk */ + if (phba->sli4_hba.conf_trunk) { + rc = -ELNRNG; + goto loopback_mode_exit; + } + + rc = lpfc_sli4_bsg_set_loopback_mode(phba, + LPFC_DIAG_LOOPBACK_TYPE_SERDES, + link_no); + } + + if (!rc) { + /* Set the need disable bit */ + phba->sli4_hba.conf_trunk |= (1 << link_no) << 4; + } + + break; + default: rc = -EINVAL; lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3141 Loopback mode:x%x not supported\n", @@ -2185,7 +2212,7 @@ loopback_mode_exit: } lpfc_bsg_diag_mode_exit(phba); -job_error: +job_done: /* make error code available to userspace */ bsg_reply->result = rc; /* complete the job back to userspace if no error */ @@ -2332,33 +2359,27 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) union lpfc_sli4_cfg_shdr *shdr; uint32_t shdr_status, shdr_add_status; struct diag_status *diag_status_reply; - int mbxstatus, rc = 0; + int mbxstatus, rc = -ENODEV, rc1 = 0; shost = fc_bsg_to_shost(job); - if (!shost) { - rc = -ENODEV; + if (!shost) goto job_error; - } + vport = shost_priv(shost); - if (!vport) { - rc = -ENODEV; + if (!vport) goto job_error; - } + phba = vport->phba; - if (!phba) { - rc = -ENODEV; + if (!phba) goto job_error; - } - if (phba->sli_rev < LPFC_SLI_REV4) { - rc = -ENODEV; + + if (phba->sli_rev < LPFC_SLI_REV4) goto job_error; - } + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < - LPFC_SLI_INTF_IF_TYPE_2) { - rc = -ENODEV; + LPFC_SLI_INTF_IF_TYPE_2) goto job_error; - } if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct sli4_link_diag)) { @@ -2385,16 +2406,20 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) goto job_error; pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmboxq) + if (!pmboxq) { + rc = -ENOMEM; goto link_diag_test_exit; + } req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - sizeof(struct lpfc_sli4_cfg_mhdr)); alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, req_len, LPFC_SLI4_MBX_EMBED); - if (alloc_len != req_len) + if (alloc_len != req_len) { + rc = -ENOMEM; goto link_diag_test_exit; + } run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, @@ -2426,13 +2451,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) diag_status_reply = (struct diag_status *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3012 Received Run link diag test reply " "below minimum size (%d): reply_len:%d\n", - (int)(sizeof(struct fc_bsg_request) + - sizeof(struct diag_status)), + (int)(sizeof(*bsg_reply) + + sizeof(*diag_status_reply)), job->reply_len); rc = -EINVAL; goto job_error; @@ -2443,7 +2467,7 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) diag_status_reply->shdr_add_status = shdr_add_status; link_diag_test_exit: - rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); + rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0); if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); @@ -2452,6 +2476,8 @@ link_diag_test_exit: job_error: /* make error code available to userspace */ + if (rc1 && !rc) + rc = rc1; bsg_reply->result = rc; /* complete the job back to userspace if no error */ if (rc == 0) @@ -2501,7 +2527,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) return -ENOMEM; } - dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf; + dmabuff = mbox->ctx_buf; mbox->ctx_buf = NULL; mbox->ctx_ndlp = NULL; status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); @@ -2570,7 +2596,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) * * This function obtains the transmit and receive ids required to send * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp - * flags are used to the unsolicted response handler is able to process + * flags are used to the unsolicited response handler is able to process * the ct command sent on the same port. **/ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, @@ -2578,7 +2604,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, { struct lpfc_bsg_event *evt; struct lpfc_iocbq *cmdiocbq, *rspiocbq; - IOCB_t *cmd, *rsp; struct lpfc_dmabuf *dmabuf; struct ulp_bde64 *bpl = NULL; struct lpfc_sli_ct_request *ctreq = NULL; @@ -2586,6 +2611,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, int time_left; int iocb_stat = IOCB_SUCCESS; unsigned long flags; + u32 status; *txxri = 0; *rxxri = 0; @@ -2629,9 +2655,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, goto err_get_xri_exit; } - cmd = &cmdiocbq->iocb; - rsp = &rspiocbq->iocb; - memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; @@ -2641,43 +2664,30 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; ctreq->CommandResponse.bits.Size = 0; - - cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); - cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); - cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); - - cmd->un.xseq64.w5.hcsw.Fctl = LA; - cmd->un.xseq64.w5.hcsw.Dfctl = 0; - cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; - cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; - - cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; - cmd->ulpBdeCount = 1; - cmd->ulpLe = 1; - cmd->ulpClass = CLASS3; - cmd->ulpContext = rpi; - - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->bpl_dmabuf = dmabuf; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; - cmdiocbq->iocb_cmpl = NULL; + cmdiocbq->cmd_cmpl = NULL; + + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1, + FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR); iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, - rspiocbq, - (phba->fc_ratov * 2) - + LPFC_DRVR_TIMEOUT); - if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { + rspiocbq, (phba->fc_ratov * 2) + + LPFC_DRVR_TIMEOUT); + + status = get_job_ulpstatus(phba, rspiocbq); + if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) { ret_val = -EIO; goto err_get_xri_exit; } - *txxri = rsp->ulpContext; + *txxri = get_job_ulpcontext(phba, rspiocbq); evt->waiting = 1; evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -2871,16 +2881,16 @@ out: } /** - * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd + * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd * @phba: Pointer to HBA context object * @rxxri: Receive exchange id * @len: Number of data bytes * * This function allocates and posts a data buffer of sufficient size to receive - * an unsolicted CT command. + * an unsolicited CT command. **/ -static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, - size_t len) +static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, + size_t len) { struct lpfc_sli_ring *pring; struct lpfc_iocbq *cmdiocbq; @@ -2917,7 +2927,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, /* Queue buffers for the receive exchange */ num_bde = (uint32_t)rxbuffer->flag; dmp = &rxbuffer->dma; - cmd = &cmdiocbq->iocb; i = 0; @@ -2947,7 +2956,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); cmd->un.cont64[i].tus.f.bdeSize = ((struct lpfc_dmabufext *)mp[i])->size; - cmd->ulpBdeCount = ++i; + cmd->ulpBdeCount = ++i; if ((--num_bde > 0) && (i < 2)) continue; @@ -2985,7 +2994,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, ret_val = -EIO; goto err_post_rxbufs_exit; } - cmd = &cmdiocbq->iocb; i = 0; } @@ -3037,7 +3045,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) size_t segment_len = 0, segment_offset = 0, current_offset = 0; uint16_t rpi = 0; struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; - IOCB_t *cmd, *rsp = NULL; + union lpfc_wqe128 *cmdwqe, *rspwqe; struct lpfc_sli_ct_request *ctreq; struct lpfc_dmabuf *txbmp; struct ulp_bde64 *txbpl = NULL; @@ -3130,7 +3138,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) goto loopback_test_exit; } - rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); + rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size); if (rc) { lpfcdiag_loop_self_unreg(phba, rpi); goto loopback_test_exit; @@ -3173,9 +3181,12 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) goto err_loopback_test_exit; } - cmd = &cmdiocbq->iocb; - if (phba->sli_rev < LPFC_SLI_REV4) - rsp = &rspiocbq->iocb; + cmdwqe = &cmdiocbq->wqe; + memset(cmdwqe, 0, sizeof(*cmdwqe)); + if (phba->sli_rev < LPFC_SLI_REV4) { + rspwqe = &rspiocbq->wqe; + memset(rspwqe, 0, sizeof(*rspwqe)); + } INIT_LIST_HEAD(&head); list_add_tail(&head, &txbuffer->dma.list); @@ -3188,8 +3199,8 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_ELX_LOOPBACK; ctreq->FsSubType = 0; - ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; - ctreq->CommandResponse.bits.Size = size; + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(ELX_LOOPBACK_DATA); + ctreq->CommandResponse.bits.Size = cpu_to_be16(size); segment_offset = ELX_LOOPBACK_HEADER_SZ; } else segment_offset = 0; @@ -3207,41 +3218,35 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) /* Build the XMIT_SEQUENCE iocb */ num_bde = (uint32_t)txbuffer->flag; - cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); - cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); - cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); - - cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); - cmd->un.xseq64.w5.hcsw.Dfctl = 0; - cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; - cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; + cmdiocbq->num_bdes = num_bde; + cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC; + cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK; + if (phba->cfg_vmid_app_header) + cmdiocbq->cmd_flag |= LPFC_IO_VMID; - cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; - cmd->ulpBdeCount = 1; - cmd->ulpLe = 1; - cmd->ulpClass = CLASS3; + cmdiocbq->vport = phba->pport; + cmdiocbq->cmd_cmpl = NULL; + cmdiocbq->bpl_dmabuf = txbmp; if (phba->sli_rev < LPFC_SLI_REV4) { - cmd->ulpContext = txxri; + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri, + num_bde, FC_RCTL_DD_UNSOL_CTL, 1, + CMD_XMIT_SEQUENCE64_CX); + } else { - cmd->un.xseq64.bdl.ulpIoTag32 = 0; - cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; - cmdiocbq->context3 = txbmp; + lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, + phba->sli4_hba.rpi_ids[rpi], 0xffff, + full_size, FC_RCTL_DD_UNSOL_CTL, 1, + CMD_XMIT_SEQUENCE64_WQE); cmdiocbq->sli4_xritag = NO_XRI; - cmd->unsli3.rcvsli3.ox_id = 0xffff; } - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK; - cmdiocbq->vport = phba->pport; - cmdiocbq->iocb_cmpl = NULL; + iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - - if ((iocb_stat != IOCB_SUCCESS) || - ((phba->sli_rev < LPFC_SLI_REV4) && - (rsp->ulpStatus != IOSTAT_SUCCESS))) { + if (iocb_stat != IOCB_SUCCESS || + (phba->sli_rev < LPFC_SLI_REV4 && + (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3126 Failed loopback test issue iocb: " "iocb_stat:x%x\n", iocb_stat); @@ -3252,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; @@ -3348,8 +3352,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job) event_reply = (struct get_mgmt_rev_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2741 Received GET_DFC_REV reply below " "minimum size\n"); @@ -3375,7 +3378,7 @@ job_error: * This is completion handler function for mailbox commands issued from * lpfc_bsg_issue_mbox function. This function is called by the * mailbox event handler function with no lock held. This function - * will wake up thread waiting on the wait queue pointed by context1 + * will wake up thread waiting on the wait queue pointed by dd_data * of the mailbox. **/ static void @@ -3388,7 +3391,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) unsigned long flags; uint8_t *pmb, *pmb_buf; - dd_data = pmboxq->ctx_ndlp; + dd_data = pmboxq->ctx_u.dd_data; /* * The outgoing buffer is readily referred from the dma buffer, @@ -3460,13 +3463,14 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_RUN_DIAGS: case MBX_RESTART: case MBX_SET_MASK: - if (!(vport->fc_flag & FC_OFFLINE_MODE)) { + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2743 Command 0x%x is illegal in on-line " "state\n", mb->mbxCommand); return -EPERM; } + break; case MBX_WRITE_NV: case MBX_WRITE_VPARMS: case MBX_LOAD_SM: @@ -3501,15 +3505,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, "1226 mbox: set_variable 0x%x, 0x%x\n", mb->un.varWords[0], mb->un.varWords[1]); - if ((mb->un.varWords[0] == SETVAR_MLOMNT) - && (mb->un.varWords[1] == 1)) { - phba->wait_4_mlo_maint_flg = 1; - } else if (mb->un.varWords[0] == SETVAR_MLORST) { - spin_lock_irq(&phba->hbalock); - phba->link_flag &= ~LS_LOOPBACK_MODE; - spin_unlock_irq(&phba->hbalock); - phba->fc_topology = LPFC_TOPOLOGY_PT_PT; - } break; case MBX_READ_SPARM64: case MBX_REG_LOGIN: @@ -3527,7 +3522,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, } /** - * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session + * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session * @phba: Pointer to HBA context object. * * This is routine clean up and reset BSG handling of multi-buffer mbox @@ -3552,6 +3547,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) } /** + * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded + * READ_OBJECT_V0 mailbox commands + * @phba: pointer to lpfc_hba data struct + * @pmb_buf: pointer to mailbox buffer + * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region + * @job: pointer to bsg_job struct + * @bsg_reply: point to bsg_reply struct + * + * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies + * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job + * structure for passing back to application layer. + * + * Return codes + * 0 - successful + * -EINVAL - invalid HBD_CNT + * -ENODEV - pointer to bsg_job struct is NULL + **/ +static int +lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf, + struct lpfc_sli_config_mbox *sli_cfg_mbx, + struct bsg_job *job, + struct fc_bsg_reply *bsg_reply) +{ + struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; + struct lpfc_sli_config_emb0_subsys *emb0_subsys; + u32 hbd_cnt; + u32 dma_buf_len; + u8 i = 0; + size_t extra_bytes; + off_t skip = 0; + + if (!job) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2496 NULL job\n"); + return -ENODEV; + } + + if (!bsg_reply) { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2498 NULL bsg_reply\n"); + return -ENODEV; + } + + emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys; + + hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt, + emb0_subsys); + + /* Calculate where the read object's read data payload is located based + * on HBD count scheme. + */ + if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt && + hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) { + skip = rd_obj_scheme[0].payload_word_offset * 4; + } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt && + hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) { + skip = rd_obj_scheme[1].payload_word_offset * 4; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2497 bad hbd_count 0x%08x\n", + hbd_cnt); + return -EINVAL; + } + + /* Copy SLI_CONFIG command and READ_OBJECT response first */ + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + pmb_buf, skip); + + /* Copy data from hbds */ + list_for_each_entry_safe(curr_dmabuf, next_dmabuf, + &phba->mbox_ext_buf_ctx.ext_dmabuf_list, + list) { + dma_buf_len = emb0_subsys->hbd[i].buf_len; + + /* Use sg_copy_buffer to specify a skip offset */ + extra_bytes = sg_copy_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + curr_dmabuf->virt, + dma_buf_len, skip, false); + + bsg_reply->reply_payload_rcv_len += extra_bytes; + + skip += extra_bytes; + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2499 copied hbd[%d] " + "0x%zx bytes\n", + i, extra_bytes); + i++; + } + + return 0; +} + +/** * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl * @phba: Pointer to HBA context object. * @pmboxq: Pointer to mailbox command. @@ -3564,16 +3656,16 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; struct bsg_job *job; - struct fc_bsg_reply *bsg_reply; + struct fc_bsg_reply *bsg_reply = NULL; uint8_t *pmb, *pmb_buf; unsigned long flags; - uint32_t size; + u32 size, opcode; int rc = 0; struct lpfc_dmabuf *dmabuf; struct lpfc_sli_config_mbox *sli_cfg_mbx; uint8_t *pmbx; - dd_data = pmboxq->ctx_buf; + dd_data = pmboxq->ctx_u.dd_data; /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); @@ -3604,6 +3696,24 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], &pmbx[sizeof(MAILBOX_t)], sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); + + /* Special handling for non-embedded READ_OBJECT */ + opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, + &sli_cfg_mbx->un.sli_config_emb0_subsys); + switch (opcode) { + case COMN_OPCODE_READ_OBJECT: + if (job) { + rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf, + sli_cfg_mbx, + job, + bsg_reply); + bsg_reply->result = rc; + goto done; + } + break; + default: + break; + } } /* Complete the job if the job is still active */ @@ -3617,12 +3727,14 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* result for successful */ bsg_reply->result = 0; +done: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer mailbox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", phba->mbox_ext_buf_ctx.nembType, - phba->mbox_ext_buf_ctx.mboxType, size); + phba->mbox_ext_buf_ctx.mboxType, + job->reply_payload.payload_len); lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, @@ -3816,14 +3928,14 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, } /** - * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read + * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read * @phba: Pointer to HBA context object. - * @mb: Pointer to a BSG mailbox object. + * @job: Pointer to the job object. * @nemb_tp: Enumerate of non-embedded mailbox command type. - * @dmabuff: Pointer to a DMA buffer descriptor. + * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with - * non-embedded external bufffers. + * non-embedded external buffers. **/ static int lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, @@ -3832,14 +3944,16 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, { struct fc_bsg_request *bsg_request = job->request; struct lpfc_sli_config_mbox *sli_cfg_mbx; + struct lpfc_sli_config_emb0_subsys *emb0_subsys; + struct list_head *ext_dmabuf_list; struct dfc_mbox_req *mbox_req; struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; - uint32_t ext_buf_cnt, ext_buf_index; + u32 ext_buf_cnt, ext_buf_index, hbd_cnt; struct lpfc_dmabuf *ext_dmabuf = NULL; struct bsg_job_data *dd_data = NULL; LPFC_MBOXQ_t *pmboxq = NULL; MAILBOX_t *pmb; - uint8_t *pmbx; + u8 *pmbx, opcode; int rc, i; mbox_req = @@ -3849,8 +3963,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; if (nemb_tp == nemb_mse) { + emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys; ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, - &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); + &emb0_subsys->sli_config_hdr); if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2945 Handled SLI_CONFIG(mse) rd, " @@ -3860,6 +3975,57 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, rc = -ERANGE; goto job_error; } + + /* Special handling for non-embedded READ_OBJECT */ + opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys); + switch (opcode) { + case COMN_OPCODE_READ_OBJECT: + hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt, + emb0_subsys); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2449 SLI_CONFIG(mse) rd non-embedded " + "hbd count = %d\n", + hbd_cnt); + + ext_dmabuf_list = + &phba->mbox_ext_buf_ctx.ext_dmabuf_list; + + /* Allocate hbds */ + for (i = 0; i < hbd_cnt; i++) { + ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); + if (!ext_dmabuf) { + rc = -ENOMEM; + goto job_error; + } + list_add_tail(&ext_dmabuf->list, + ext_dmabuf_list); + } + + /* Fill out the physical memory addresses for the + * hbds + */ + i = 0; + list_for_each_entry_safe(curr_dmabuf, next_dmabuf, + ext_dmabuf_list, list) { + emb0_subsys->hbd[i].pa_hi = + putPaddrHigh(curr_dmabuf->phys); + emb0_subsys->hbd[i].pa_lo = + putPaddrLow(curr_dmabuf->phys); + + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, + "2495 SLI_CONFIG(hbd)[%d], " + "bufLen:%d, addrHi:x%x, " + "addrLo:x%x\n", i, + emb0_subsys->hbd[i].buf_len, + emb0_subsys->hbd[i].pa_hi, + emb0_subsys->hbd[i].pa_lo); + i++; + } + break; + default: + break; + } + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2941 Handled SLI_CONFIG(mse) rd, " "ext_buf_cnt:%d\n", ext_buf_cnt); @@ -3960,7 +4126,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; /* context fields to callback function */ - pmboxq->ctx_buf = dd_data; + pmboxq->ctx_u.dd_data = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; @@ -4007,11 +4173,12 @@ job_error: /** * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write * @phba: Pointer to HBA context object. - * @mb: Pointer to a BSG mailbox object. - * @dmabuff: Pointer to a DMA buffer descriptor. + * @job: Pointer to the job object. + * @nemb_tp: Enumerate of non-embedded mailbox command type. + * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with - * non-embedded external bufffers. + * non-embedded external buffers. **/ static int lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, @@ -4131,7 +4298,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; /* context fields to callback function */ - pmboxq->ctx_buf = dd_data; + pmboxq->ctx_u.dd_data = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; @@ -4155,7 +4322,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job, goto job_error; } - /* wait for additoinal external buffers */ + /* wait for additional external buffers */ bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, @@ -4173,12 +4340,12 @@ job_error: /** * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer * @phba: Pointer to HBA context object. - * @mb: Pointer to a BSG mailbox object. - * @dmabuff: Pointer to a DMA buffer descriptor. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded - * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B - * with embedded sussystem 0x1 and opcodes with external HBDs. + * external buffers, including both 0x9B with non-embedded MSEs and 0x9B + * with embedded subsystem 0x1 and opcodes with external HBDs. **/ static int lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, @@ -4234,6 +4401,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: case COMN_OPCODE_GET_CNTL_ATTRIBUTES: case COMN_OPCODE_GET_PROFILE_CONFIG: + case COMN_OPCODE_SET_FEATURES: + case COMN_OPCODE_READ_OBJECT: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "3106 Handled SLI_CONFIG " "subsys_comn, opcode:x%x\n", @@ -4305,7 +4474,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job, } /** - * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers + * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers * @phba: Pointer to HBA context object. * * This routine is for requesting to abort a pass-through mailbox command with @@ -4324,7 +4493,7 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) /** * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer * @phba: Pointer to HBA context object. - * @dmabuf: Pointer to a DMA buffer descriptor. + * @job: Pointer to the job object. * * This routine extracts the next mailbox read external buffer back to * user space through BSG. @@ -4394,6 +4563,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job) /** * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer * @phba: Pointer to HBA context object. + * @job: Pointer to the job object. * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine sets up the next mailbox read external buffer obtained @@ -4417,12 +4587,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4459,6 +4623,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4476,7 +4647,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; /* context fields to callback function */ - pmboxq->ctx_buf = dd_data; + pmboxq->ctx_u.dd_data = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; @@ -4500,13 +4671,15 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, goto job_error; } - /* wait for additoinal external buffers */ + /* wait for additional external buffers */ bsg_reply->result = 0; bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return SLI_CONFIG_HANDLED; job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); @@ -4516,8 +4689,8 @@ job_error: /** * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd * @phba: Pointer to HBA context object. - * @mb: Pointer to a BSG mailbox object. - * @dmabuff: Pointer to a DMA buffer descriptor. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. * * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox * command with multiple non-embedded external buffers. @@ -4561,10 +4734,10 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job, /** * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer * @phba: Pointer to HBA context object. - * @mb: Pointer to a BSG mailbox object. - * @dmabuff: Pointer to a DMA buffer descriptor. + * @job: Pointer to the job object. + * @dmabuf: Pointer to a DMA buffer descriptor. * - * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG + * This routine checks and handles non-embedded multi-buffer SLI_CONFIG * (0x9B) mailbox commands and external buffers. **/ static int @@ -4635,14 +4808,14 @@ sli_cfg_ext_error: /** * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app * @phba: Pointer to HBA context object. - * @mb: Pointer to a mailbox object. + * @job: Pointer to the job object. * @vport: Pointer to a vport object. * * Allocate a tracking object, mailbox command memory, get a mailbox * from the mailbox pool, copy the caller mailbox command. * * If offline and the sli is active we need to poll for the command (port is - * being reset) and com-plete the job, otherwise issue the mailbox command and + * being reset) and complete the job, otherwise issue the mailbox command and * let our completion handler finish the command. **/ static int @@ -4672,8 +4845,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, bsg_reply->reply_payload_rcv_len = 0; /* sanity check to protect driver */ - if (job->reply_payload.payload_len > BSG_MBOX_SIZE || - job->request_payload.payload_len > BSG_MBOX_SIZE) { + if (job->request_payload.payload_len > BSG_MBOX_SIZE) { rc = -ERANGE; goto job_done; } @@ -4682,7 +4854,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, * Don't allow mailbox commands to be sent when blocked or when in * the middle of discovery */ - if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { rc = -EAGAIN; goto job_done; } @@ -4744,6 +4916,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, pmb->mbxOwner = OWN_HOST; pmboxq->vport = vport; + /* non-embedded SLI_CONFIG requests already parsed, check others */ + if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2729 Cmd x%x (x%x/x%x) request has " + "out-of-range reply payload length x%x\n", + pmb->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, pmboxq), + lpfc_sli_config_mbox_opcode_get(phba, pmboxq), + job->reply_payload.payload_len); + rc = -ERANGE; + goto job_done; + } + /* If HBA encountered an error attention, allow only DUMP * or RESTART mailbox commands until the HBA is restarted. */ @@ -4761,7 +4946,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, if (mbox_req->inExtWLen || mbox_req->outExtWLen) { from = pmbx; ext = from + sizeof(MAILBOX_t); - pmboxq->ctx_buf = ext; + pmboxq->ext_buf = ext; pmboxq->in_ext_byte_len = mbox_req->inExtWLen * sizeof(uint32_t); pmboxq->out_ext_byte_len = @@ -4889,7 +5074,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; /* setup context field to pass wait_queue pointer to wake function */ - pmboxq->ctx_ndlp = dd_data; + pmboxq->ctx_u.dd_data = dd_data; dd_data->type = TYPE_MBOX; dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; @@ -4900,7 +5085,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; job->dd_data = dd_data; - if ((vport->fc_flag & FC_OFFLINE_MODE) || + if (test_bit(FC_OFFLINE_MODE, &vport->fc_flag) || (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); if (rc != MBX_SUCCESS) { @@ -4985,283 +5170,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job) return rc; } -/** - * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler - * @phba: Pointer to HBA context object. - * @cmdiocbq: Pointer to command iocb. - * @rspiocbq: Pointer to response iocb. - * - * This function is the completion handler for iocbs issued using - * lpfc_menlo_cmd function. This function is called by the - * ring event handler function without any lock held. This function - * can be called from both worker thread context and interrupt - * context. This function also can be called from another thread which - * cleans up the SLI layer objects. - * This function copies the contents of the response iocb to the - * response iocb memory object provided by the caller of - * lpfc_sli_issue_iocb_wait and then wakes up the thread which - * sleeps for the iocb completion. - **/ -static void -lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, - struct lpfc_iocbq *cmdiocbq, - struct lpfc_iocbq *rspiocbq) -{ - struct bsg_job_data *dd_data; - struct bsg_job *job; - struct fc_bsg_reply *bsg_reply; - IOCB_t *rsp; - struct lpfc_dmabuf *bmp, *cmp, *rmp; - struct lpfc_bsg_menlo *menlo; - unsigned long flags; - struct menlo_response *menlo_resp; - unsigned int rsp_size; - int rc = 0; - - dd_data = cmdiocbq->context1; - cmp = cmdiocbq->context2; - bmp = cmdiocbq->context3; - menlo = &dd_data->context_un.menlo; - rmp = menlo->rmp; - rsp = &rspiocbq->iocb; - - /* Determine if job has been aborted */ - spin_lock_irqsave(&phba->ct_ev_lock, flags); - job = dd_data->set_job; - if (job) { - bsg_reply = job->reply; - /* Prevent timeout handling from trying to abort job */ - job->dd_data = NULL; - } - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - - /* Copy the job data or set the failing status for the job */ - - if (job) { - /* always return the xri, this would be used in the case - * of a menlo download to allow the data to be sent as a - * continuation of the exchange. - */ - - menlo_resp = (struct menlo_response *) - bsg_reply->reply_data.vendor_reply.vendor_rsp; - menlo_resp->xri = rsp->ulpContext; - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: - rc = -EACCES; - break; - } - } else { - rc = -EACCES; - } - } else { - rsp_size = rsp->un.genreq64.bdl.bdeSize; - bsg_reply->reply_payload_rcv_len = - lpfc_bsg_copy_data(rmp, &job->reply_payload, - rsp_size, 0); - } - - } - - lpfc_sli_release_iocbq(phba, cmdiocbq); - lpfc_free_bsg_buffers(phba, cmp); - lpfc_free_bsg_buffers(phba, rmp); - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - kfree(bmp); - kfree(dd_data); - - /* Complete the job if active */ - - if (job) { - bsg_reply->result = rc; - bsg_job_done(job, bsg_reply->result, - bsg_reply->reply_payload_rcv_len); - } - - return; -} - -/** - * lpfc_menlo_cmd - send an ioctl for menlo hardware - * @job: fc_bsg_job to handle - * - * This function issues a gen request 64 CR ioctl for all menlo cmd requests, - * all the command completions will return the xri for the command. - * For menlo data requests a gen request 64 CX is used to continue the exchange - * supplied in the menlo request header xri field. - **/ -static int -lpfc_menlo_cmd(struct bsg_job *job) -{ - struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); - struct fc_bsg_request *bsg_request = job->request; - struct fc_bsg_reply *bsg_reply = job->reply; - struct lpfc_hba *phba = vport->phba; - struct lpfc_iocbq *cmdiocbq; - IOCB_t *cmd; - int rc = 0; - struct menlo_command *menlo_cmd; - struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; - int request_nseg; - int reply_nseg; - struct bsg_job_data *dd_data; - struct ulp_bde64 *bpl = NULL; - - /* in case no data is returned return just the return code */ - bsg_reply->reply_payload_rcv_len = 0; - - if (job->request_len < - sizeof(struct fc_bsg_request) + - sizeof(struct menlo_command)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2784 Received MENLO_CMD request below " - "minimum size\n"); - rc = -ERANGE; - goto no_dd_data; - } - - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2785 Received MENLO_CMD reply below " - "minimum size\n"); - rc = -ERANGE; - goto no_dd_data; - } - - if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2786 Adapter does not support menlo " - "commands\n"); - rc = -EPERM; - goto no_dd_data; - } - - menlo_cmd = (struct menlo_command *) - bsg_request->rqst_data.h_vendor.vendor_cmd; - - /* allocate our bsg tracking structure */ - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2787 Failed allocation of dd_data\n"); - rc = -ENOMEM; - goto no_dd_data; - } - - bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!bmp) { - rc = -ENOMEM; - goto free_dd; - } - - bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); - if (!bmp->virt) { - rc = -ENOMEM; - goto free_bmp; - } - - INIT_LIST_HEAD(&bmp->list); - - bpl = (struct ulp_bde64 *)bmp->virt; - request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); - cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, - 1, bpl, &request_nseg); - if (!cmp) { - rc = -ENOMEM; - goto free_bmp; - } - lpfc_bsg_copy_data(cmp, &job->request_payload, - job->request_payload.payload_len, 1); - - bpl += request_nseg; - reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; - rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, - bpl, &reply_nseg); - if (!rmp) { - rc = -ENOMEM; - goto free_cmp; - } - - cmdiocbq = lpfc_sli_get_iocbq(phba); - if (!cmdiocbq) { - rc = -ENOMEM; - goto free_rmp; - } - - cmd = &cmdiocbq->iocb; - cmd->un.genreq64.bdl.ulpIoTag32 = 0; - cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); - cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); - cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - cmd->un.genreq64.bdl.bdeSize = - (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); - cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); - cmd->un.genreq64.w5.hcsw.Dfctl = 0; - cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; - cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ - cmd->ulpBdeCount = 1; - cmd->ulpClass = CLASS3; - cmd->ulpOwner = OWN_CHIP; - cmd->ulpLe = 1; /* Limited Edition */ - cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - cmdiocbq->vport = phba->pport; - /* We want the firmware to timeout before we do */ - cmd->ulpTimeout = MENLO_TIMEOUT - 5; - cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; - cmdiocbq->context1 = dd_data; - cmdiocbq->context2 = cmp; - cmdiocbq->context3 = bmp; - if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { - cmd->ulpCommand = CMD_GEN_REQUEST64_CR; - cmd->ulpPU = MENLO_PU; /* 3 */ - cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ - cmd->ulpContext = MENLO_CONTEXT; /* 0 */ - } else { - cmd->ulpCommand = CMD_GEN_REQUEST64_CX; - cmd->ulpPU = 1; - cmd->un.ulpWord[4] = 0; - cmd->ulpContext = menlo_cmd->xri; - } - - dd_data->type = TYPE_MENLO; - dd_data->set_job = job; - dd_data->context_un.menlo.cmdiocbq = cmdiocbq; - dd_data->context_un.menlo.rmp = rmp; - job->dd_data = dd_data; - - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, - MENLO_TIMEOUT - 5); - if (rc == IOCB_SUCCESS) - return 0; /* done for now */ - - lpfc_sli_release_iocbq(phba, cmdiocbq); - -free_rmp: - lpfc_free_bsg_buffers(phba, rmp); -free_cmp: - lpfc_free_bsg_buffers(phba, cmp); -free_bmp: - if (bmp->virt) - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - kfree(bmp); -free_dd: - kfree(dd_data); -no_dd_data: - /* make error code available to userspace */ - bsg_reply->result = rc; - job->dd_data = NULL; - return rc; -} - static int lpfc_forced_link_speed(struct bsg_job *job) { @@ -5285,9 +5193,7 @@ lpfc_forced_link_speed(struct bsg_job *job) forced_reply = (struct forced_link_speed_support_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + - sizeof(struct forced_link_speed_support_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "0049 Received FORCED_LINK_SPEED reply below " "minimum size\n"); @@ -5295,7 +5201,8 @@ lpfc_forced_link_speed(struct bsg_job *job) goto job_error; } - forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED) + forced_reply->supported = test_bit(HBA_FORCED_LINK_SPEED, + &phba->hba_flag) ? LPFC_FORCED_LINK_SPEED_SUPPORTED : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED; job_error: @@ -5319,9 +5226,9 @@ lpfc_check_fwlog_support(struct lpfc_hba *phba) ras_fwlog = &phba->ras_fwlog; - if (ras_fwlog->ras_hwsupport == false) + if (!ras_fwlog->ras_hwsupport) return -EACCES; - else if (ras_fwlog->ras_enabled == false) + else if (!ras_fwlog->ras_enabled) return -EPERM; else return 0; @@ -5363,10 +5270,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job) bsg_reply->reply_data.vendor_reply.vendor_rsp; /* Current logging state */ - if (ras_fwlog->ras_active == true) + spin_lock_irq(&phba->ras_fwlog_lock); + if (ras_fwlog->state == ACTIVE) ras_reply->state = LPFC_RASLOG_STATE_RUNNING; else ras_reply->state = LPFC_RASLOG_STATE_STOPPED; + spin_unlock_irq(&phba->ras_fwlog_lock); ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; @@ -5376,7 +5285,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5421,10 +5332,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) if (action == LPFC_RASACTION_STOP_LOGGING) { /* Check if already disabled */ - if (ras_fwlog->ras_active == false) { + spin_lock_irq(&phba->ras_fwlog_lock); + if (ras_fwlog->state != ACTIVE) { + spin_unlock_irq(&phba->ras_fwlog_lock); rc = -ESRCH; goto ras_job_error; } + spin_unlock_irq(&phba->ras_fwlog_lock); /* Disable logging */ lpfc_ras_stop_fwlog(phba); @@ -5435,8 +5349,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) * FW-logging with new log-level. Return status * "Logging already Running" to caller. **/ - if (ras_fwlog->ras_active) + spin_lock_irq(&phba->ras_fwlog_lock); + if (ras_fwlog->state != INACTIVE) action_status = -EINPROGRESS; + spin_unlock_irq(&phba->ras_fwlog_lock); /* Enable logging */ rc = lpfc_sli4_ras_fwlog_init(phba, log_level, @@ -5455,8 +5371,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, - bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5516,7 +5433,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5549,10 +5468,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) goto ras_job_error; /* Logging to be stopped before reading */ - if (ras_fwlog->ras_active == true) { + spin_lock_irq(&phba->ras_fwlog_lock); + if (ras_fwlog->state == ACTIVE) { + spin_unlock_irq(&phba->ras_fwlog_lock); rc = -EINPROGRESS; goto ras_job_error; } + spin_unlock_irq(&phba->ras_fwlog_lock); if (job->request_len < sizeof(struct fc_bsg_request) + @@ -5598,7 +5520,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) ras_job_error: bsg_reply->result = rc; - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5624,8 +5548,7 @@ lpfc_get_trunk_info(struct bsg_job *job) event_reply = (struct lpfc_trunk_info *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2728 Received GET TRUNK _INFO reply below " "minimum size\n"); @@ -5666,15 +5589,102 @@ lpfc_get_trunk_info(struct bsg_job *job) event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000; event_reply->logical_speed = - phba->sli4_hba.link_state.logical_speed / 100; + phba->sli4_hba.link_state.logical_speed / 1000; job_error: bsg_reply->result = rc; - bsg_job_done(job, bsg_reply->result, - bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } +static int +lpfc_get_cgnbuf_info(struct bsg_job *job) +{ + struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job)); + struct lpfc_hba *phba = vport->phba; + struct fc_bsg_request *bsg_request = job->request; + struct fc_bsg_reply *bsg_reply = job->reply; + struct get_cgnbuf_info_req *cgnbuf_req; + struct lpfc_cgn_info *cp; + uint8_t *cgn_buff; + size_t size, cinfosz; + int rc = 0; + + if (job->request_len < sizeof(struct fc_bsg_request) + + sizeof(struct get_cgnbuf_info_req)) { + rc = -ENOMEM; + goto job_exit; + } + + if (!phba->sli4_hba.pc_sli4_params.cmf) { + rc = -ENOENT; + goto job_exit; + } + + if (!phba->cgn_i || !phba->cgn_i->virt) { + rc = -ENOENT; + goto job_exit; + } + + cp = phba->cgn_i->virt; + if (cp->cgn_info_version < LPFC_CGN_INFO_V3) { + rc = -EPERM; + goto job_exit; + } + + cgnbuf_req = (struct get_cgnbuf_info_req *) + bsg_request->rqst_data.h_vendor.vendor_cmd; + + /* For reset or size == 0 */ + bsg_reply->reply_payload_rcv_len = 0; + + if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) { + lpfc_init_congestion_stat(phba); + goto job_exit; + } + + /* We don't want to include the CRC at the end */ + cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t); + + size = cgnbuf_req->read_size; + if (!size) + goto job_exit; + + if (size < cinfosz) { + /* Just copy back what we can */ + cinfosz = size; + rc = -E2BIG; + } + + /* Allocate memory to read congestion info */ + cgn_buff = vmalloc(cinfosz); + if (!cgn_buff) { + rc = -ENOMEM; + goto job_exit; + } + + memcpy(cgn_buff, cp, cinfosz); + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + cgn_buff, cinfosz); + + vfree(cgn_buff); + +job_exit: + bsg_reply->result = rc; + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + else + lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, + "2724 GET CGNBUF error: %d\n", rc); + return rc; +} + /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle @@ -5715,10 +5725,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job) case LPFC_BSG_VENDOR_MBOX: rc = lpfc_bsg_mbox_cmd(job); break; - case LPFC_BSG_VENDOR_MENLO_CMD: - case LPFC_BSG_VENDOR_MENLO_DATA: - rc = lpfc_menlo_cmd(job); - break; case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: rc = lpfc_forced_link_speed(job); break; @@ -5737,6 +5743,9 @@ lpfc_bsg_hst_vendor(struct bsg_job *job) case LPFC_BSG_VENDOR_GET_TRUNK_INFO: rc = lpfc_get_trunk_info(job); break; + case LPFC_BSG_VENDOR_GET_CGNBUF_INFO: + rc = lpfc_get_cgnbuf_info(job); + break; default: rc = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; @@ -5836,7 +5845,7 @@ lpfc_bsg_timeout(struct bsg_job *job) spin_lock_irqsave(&phba->hbalock, flags); /* make sure the I/O abort window is still open */ - if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { + if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) { spin_unlock_irqrestore(&phba->hbalock, flags); return -EAGAIN; } @@ -5848,7 +5857,7 @@ lpfc_bsg_timeout(struct bsg_job *job) } } if (list_empty(&completions)) - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL); spin_unlock_irqrestore(&phba->hbalock, flags); if (!list_empty(&completions)) { lpfc_sli_cancel_iocbs(phba, &completions, @@ -5868,31 +5877,6 @@ lpfc_bsg_timeout(struct bsg_job *job) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; - case TYPE_MENLO: - /* Check to see if IOCB was issued to the port or not. If not, - * remove it from the txq queue and call cancel iocbs. - * Otherwise, call abort iotag. - */ - cmdiocb = dd_data->context_un.menlo.cmdiocbq; - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - - spin_lock_irqsave(&phba->hbalock, flags); - list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, - list) { - if (check_iocb == cmdiocb) { - list_move_tail(&check_iocb->list, &completions); - break; - } - } - if (list_empty(&completions)) - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irqrestore(&phba->hbalock, flags); - if (!list_empty(&completions)) { - lpfc_sli_cancel_iocbs(phba, &completions, - IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); - } - break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; |
