diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
| -rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 475 |
1 files changed, 254 insertions, 221 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 88760416a8cb..4cfc928bcf2d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * - * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> @@ -105,7 +105,7 @@ lpfc_nvmet_cmd_template(void) /* Word 9 - reqtag, rcvoxid is variable */ /* Word 10 - wqes, xc is variable */ - bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); @@ -153,7 +153,7 @@ lpfc_nvmet_cmd_template(void) /* Word 10 - xc is variable */ bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); - bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12); bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1); @@ -195,7 +195,7 @@ lpfc_nvmet_cmd_template(void) /* Word 10 wqes, xc is variable */ bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1); - bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); + bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG); bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0); bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE); @@ -285,7 +285,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, * transmission of an NVME LS response. * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. The function frees memory resources used for the command @@ -293,17 +293,18 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, **/ void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { - struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2; + struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; uint32_t status, result; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6410 NVMEx LS cmpl state mismatch IO x%x: " "%d %d\n", axchg->oxid, axchg->state, axchg->entry_cnt); @@ -316,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n", status, result, axchg->oxid); - lpfc_nlp_put(cmdwqe->context1); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + lpfc_nlp_put(cmdwqe->ndlp); + cmdwqe->context_un.axchg = NULL; + cmdwqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, cmdwqe); ls_rsp->done(ls_rsp); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, @@ -331,7 +332,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME LS commands @@ -340,15 +341,16 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, **/ static void lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_nvmet_tgtport *tgtp; uint32_t status, result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; if (!phba->targetport) goto finish; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -365,14 +367,13 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } finish: - __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe); + __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe); } /** * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context * @phba: HBA buffer is associated with - * @ctxp: context to clean up - * @mp: Buffer to free + * @ctx_buf: ctx buffer context * * Description: Frees the given DMA buffer in the appropriate way given by * reposting it to its associated RQ so it can be reused. @@ -395,7 +396,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) unsigned long iflag; if (ctxp->state == LPFC_NVME_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } @@ -474,7 +475,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6181 Unable to queue deferred work " "for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", @@ -708,7 +709,7 @@ out: * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME FCP commands @@ -716,17 +717,18 @@ out: **/ static void lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_nvmet_tgtport *tgtp; struct nvmefc_tgt_fcp_req *rsp; struct lpfc_async_xchg_ctx *ctxp; - uint32_t status, result, op, start_clean, logerr; + uint32_t status, result, op, logerr; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS int id; #endif - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; ctxp->flag &= ~LPFC_NVME_IO_INP; rsp = &ctxp->hdlrctx.fcp_req; @@ -818,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ } else { ctxp->entry_cnt++; - start_clean = offsetof(struct lpfc_iocbq, iocb_flag); - memset(((char *)cmdwqe) + start_clean, 0, - (sizeof(struct lpfc_iocbq) - start_clean)); + memset_startat(cmdwqe, 0, cmd_flag); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (ctxp->ts_cmd_nvme) { ctxp->ts_isr_data = cmdwqe->isr_timestamp; @@ -863,7 +863,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, struct nvmefc_ls_rsp *ls_rsp, void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe)) + struct lpfc_iocbq *rspwqe)) { struct lpfc_hba *phba = axchg->phba; struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer; @@ -872,14 +872,14 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, struct ulp_bde64 bpl; int rc; - if (phba->pport->load_flag & FC_UNLOADING) + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return -ENODEV; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid); if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6412 NVMEx LS rsp state mismatch " "oxid x%x: %d %d\n", axchg->oxid, axchg->state, axchg->entry_cnt); @@ -891,8 +891,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma, ls_rsp->rsplen); if (nvmewqeq == NULL) { - lpfc_printf_log(phba, KERN_ERR, - LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6150 NVMEx LS Drop Rsp x%x: Prep\n", axchg->oxid); rc = -ENOMEM; @@ -900,9 +899,9 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, } /* Save numBdes for bpl2sgl */ - nvmewqeq->rsvd2 = 1; + nvmewqeq->num_bdes = 1; nvmewqeq->hba_wqidx = 0; - nvmewqeq->context3 = &dmabuf; + nvmewqeq->bpl_dmabuf = &dmabuf; dmabuf.virt = &bpl; bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; @@ -915,9 +914,8 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, * be referenced after it returns back to this routine. */ - nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp; - nvmewqeq->iocb_cmpl = NULL; - nvmewqeq->context2 = axchg; + nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp; + nvmewqeq->context_un.axchg = axchg; lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n", axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen); @@ -925,7 +923,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq); /* clear to be sure there's no reference */ - nvmewqeq->context3 = NULL; + nvmewqeq->bpl_dmabuf = NULL; if (rc == WQE_SUCCESS) { /* @@ -936,14 +934,13 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg, return 0; } - lpfc_printf_log(phba, KERN_ERR, - LOG_NVME_DISC | LOG_NVME_IOERR | LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6151 NVMEx LS RSP x%x: failed to transmit %d\n", axchg->oxid, rc); rc = -ENXIO; - lpfc_nlp_put(nvmewqeq->context1); + lpfc_nlp_put(nvmewqeq->ndlp); out_free_buf: /* Give back resources */ @@ -987,7 +984,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_tgtport *nvmep = tgtport->private; int rc; - if (axchg->phba->pport->load_flag & FC_UNLOADING) + if (test_bit(FC_UNLOADING, &axchg->phba->pport->load_flag)) return -ENODEV; rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp); @@ -1025,7 +1022,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, int id; #endif - if (phba->pport->load_flag & FC_UNLOADING) { + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { rc = -ENODEV; goto aerr; } @@ -1058,7 +1055,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, if ((ctxp->flag & LPFC_NVME_ABTS_RCV) || (ctxp->state == LPFC_NVME_STE_ABORT)) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6102 IO oxid x%x aborted\n", ctxp->oxid); rc = -ENXIO; @@ -1068,17 +1065,16 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); if (nvmewqeq == NULL) { atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6152 FCP Drop IO x%x: Prep\n", ctxp->oxid); rc = -ENXIO; goto aerr; } - nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; - nvmewqeq->iocb_cmpl = NULL; - nvmewqeq->context2 = ctxp; - nvmewqeq->iocb_flag |= LPFC_IO_NVMET; + nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; + nvmewqeq->context_un.axchg = ctxp; + nvmewqeq->cmd_flag |= LPFC_IO_NVMET; ctxp->wqeq->hba_wqidx = rsp->hwqid; lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", @@ -1116,13 +1112,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, /* Give back resources */ atomic_inc(&lpfc_nvmep->xmt_fcp_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6153 FCP Drop IO x%x: Issue: %d\n", ctxp->oxid, rc); ctxp->wqeq->hba_wqidx = 0; - nvmewqeq->context2 = NULL; - nvmewqeq->context3 = NULL; + nvmewqeq->context_un.axchg = NULL; + nvmewqeq->bpl_dmabuf = NULL; rc = -EBUSY; aerr: return rc; @@ -1149,7 +1145,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, struct lpfc_queue *wq; unsigned long flags; - if (phba->pport->load_flag & FC_UNLOADING) + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return; if (!ctxp->hdwq) @@ -1216,7 +1212,7 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, ctxp->flag, ctxp->oxid); else if (ctxp->state != LPFC_NVME_STE_DONE && ctxp->state != LPFC_NVME_STE_ABORT) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); @@ -1247,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_tgtport *tgtp; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); - struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct rqb_dmabuf *nvmebuf; struct lpfc_hba *phba = ctxp->phba; unsigned long iflag; @@ -1255,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, raw_smp_processor_id()); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + nvmebuf = ctxp->rqb_buffer; if (!nvmebuf) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6425 Defer rcv: no buffer oxid x%x: " "flg %x ste %x\n", ctxp->oxid, ctxp->flag, ctxp->state); return; } + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); tgtp = phba->targetport->private; if (tgtp) @@ -1269,16 +1270,13 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->rqb_buffer = NULL; - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } /** * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request * @phba: Pointer to HBA context object * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * This function is the completion handler for NVME LS requests. * The function updates any states and statistics, then calls the @@ -1286,17 +1284,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, **/ static void lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe); } /** * lpfc_nvmet_ls_req - Issue an Link Service request - * @targetport - pointer to target instance registered with nvmet transport. - * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv. + * @targetport: pointer to target instance registered with nvmet transport. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. * Driver sets this value to the ndlp pointer. - * @pnvme_lsreq - the transport nvme_ls_req structure for the LS + * @pnvme_lsreq: the transport nvme_ls_req structure for the LS * * Driver registers this routine to handle any link service request * from the nvme_fc transport to a remote nvme-aware port. @@ -1320,7 +1319,7 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, return -EINVAL; phba = lpfc_nvmet->phba; - if (phba->pport->load_flag & FC_UNLOADING) + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return -EINVAL; hstate = atomic_read(&lpfc_nvmet->state); @@ -1338,9 +1337,9 @@ lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport, /** * lpfc_nvmet_ls_abort - Abort a prior NVME LS request * @targetport: Transport targetport, that LS was issued from. - * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv. + * @hosthandle: hosthandle set by the driver in a prior ls_rqst_rcv. * Driver sets this value to the ndlp pointer. - * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted + * @pnvme_lsreq: the transport nvme_ls_req structure for LS to be aborted * * Driver registers this routine to abort an NVME LS request that is * in progress (from the transports perspective). @@ -1356,7 +1355,7 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, int ret; phba = lpfc_nvmet->phba; - if (phba->pport->load_flag & FC_UNLOADING) + if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return; ndlp = (struct lpfc_nodelist *)hosthandle; @@ -1366,21 +1365,36 @@ lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport, atomic_inc(&lpfc_nvmet->xmt_ls_abort); } +static int +lpfc_nvmet_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) +{ + struct lpfc_nodelist *ndlp = hosthandle; + + *wwnn = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + *wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn); + return 0; +} + static void lpfc_nvmet_host_release(void *hosthandle) { struct lpfc_nodelist *ndlp = hosthandle; - struct lpfc_hba *phba = NULL; + struct lpfc_hba *phba = ndlp->phba; struct lpfc_nvmet_tgtport *tgtp; - phba = ndlp->phba; if (!phba->targetport || !phba->targetport->private) return; lpfc_printf_log(phba, KERN_ERR, LOG_NVME, - "6202 NVMET XPT releasing hosthandle x%px\n", - hosthandle); + "6202 NVMET XPT releasing hosthandle x%px " + "DID x%x xflags x%x refcnt %d\n", + hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags, + kref_read(&ndlp->kref)); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irq(&ndlp->lock); + ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + lpfc_nlp_put(ndlp); atomic_set(&tgtp->state, 0); } @@ -1395,7 +1409,7 @@ lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport) phba = tgtp->phba; rc = lpfc_issue_els_rscn(phba->pport, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6420 NVMET subsystem change: Notification %s\n", (rc) ? "Failed" : "Sent"); } @@ -1411,6 +1425,7 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = { .ls_req = lpfc_nvmet_ls_req, .ls_abort = lpfc_nvmet_ls_abort, .host_release = lpfc_nvmet_host_release, + .host_traddr = lpfc_nvmet_host_traddr, .max_hw_queues = 1, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, @@ -1438,7 +1453,10 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, list_del_init(&ctx_buf->list); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock(&phba->hbalock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); + spin_unlock(&phba->hbalock); + ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->ndlp = NULL; @@ -1464,7 +1482,7 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) if (!infop) return; - /* Cycle the the entire CPU context list for every MRQ */ + /* Cycle the entire CPU context list for every MRQ */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { for_each_present_cpu(j) { infop = lpfc_get_ctx_list(phba, j, i); @@ -1493,7 +1511,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6419 Failed allocate memory for " "nvmet context lists\n"); return -ENOMEM; @@ -1551,7 +1569,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6404 Ran out of memory for NVMET\n"); return -ENOMEM; } @@ -1560,7 +1578,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) GFP_KERNEL); if (!ctx_buf->context) { kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6405 Ran out of NVMET " "context memory\n"); return -ENOMEM; @@ -1572,18 +1590,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) if (!ctx_buf->iocbq) { kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6406 Ran out of NVMET iocb/WQEs\n"); return -ENOMEM; } - ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; + ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET; nvmewqe = ctx_buf->iocbq; wqe = &nvmewqe->wqe; /* Initialize WQE */ - memset(wqe, 0, sizeof(union lpfc_wqe)); + memset(wqe, 0, sizeof(*wqe)); - ctx_buf->iocbq->context1 = NULL; + ctx_buf->iocbq->cmd_dmabuf = NULL; spin_lock(&phba->sli4_hba.sgl_list_lock); ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); spin_unlock(&phba->sli4_hba.sgl_list_lock); @@ -1591,7 +1609,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); kfree(ctx_buf->context); kfree(ctx_buf); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } @@ -1615,10 +1633,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) cpu = cpumask_first(cpu_present_mask); continue; } - cpu = cpumask_next(cpu, cpu_present_mask); - if (cpu == nr_cpu_ids) - cpu = cpumask_first(cpu_present_mask); - + cpu = lpfc_next_present_cpu(cpu); } for_each_present_cpu(i) { @@ -1670,7 +1685,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) error = -ENOENT; #endif if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6025 Cannot register NVME targetport x%x: " "portnm %llx nodenm %llx segs %d qs %d\n", error, @@ -1785,31 +1800,35 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe); } - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_lock(&ctxp->ctxlock); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); /* Check if we already received a free context call * and we have completed processing an abort situation. */ if (ctxp->flag & LPFC_NVME_CTX_RLS && !(ctxp->flag & LPFC_NVME_ABORT_OP)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_XBUSY; - spin_unlock(&ctxp->ctxlock); - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + if (ndlp && (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { lpfc_set_rrq_active(phba, ndlp, @@ -1828,9 +1847,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, lpfc_worker_wake_up(phba); return; } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri); if (ctxp) { /* @@ -1874,8 +1891,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, sid = sli4_sid_from_fc_hdr(fc_hdr); oxid = be16_to_cpu(fc_hdr->fh_ox_id); - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { @@ -1884,9 +1900,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, xri = ctxp->ctxbuf->sglq->sli4_xritag; - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); ctxp->flag |= LPFC_NVME_ABTS_RCV; spin_unlock_irqrestore(&ctxp->ctxlock, iflag); @@ -1905,9 +1920,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } - spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag); /* check the wait list */ if (phba->sli4_hba.nvmet_io_wait_cnt) { struct rqb_dmabuf *nvmebuf; @@ -2022,12 +2035,14 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, &wq->wqfull_list, list) { if (ctxp) { /* Checking for a specific IO to flush */ - if (nvmewqeq->context2 == ctxp) { + if (nvmewqeq->context_un.axchg == ctxp) { list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, + sizeof(*wcqep)); lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, - wcqep); + nvmewqeq); return; } continue; @@ -2035,7 +2050,8 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, /* Flush all IOs */ list_del(&nvmewqeq->list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep); + memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep)); + lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); } } @@ -2065,7 +2081,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2; + ctxp = nvmewqeq->context_un.axchg; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); if (rc == -EBUSY) { @@ -2112,9 +2128,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6179 Unreg targetport x%px timeout " "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); @@ -2128,7 +2144,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * @axchg: pointer to exchange context for the NVME LS request * - * This routine is used for processing an asychronously received NVME LS + * This routine is used for processing an asynchronously received NVME LS * request. Any remaining validation is done and the LS is then forwarded * to the nvmet-fc transport via nvmet_fc_rcv_ls_req(). * @@ -2187,7 +2203,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) unsigned long iflags; if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6159 process_rcv_fcp_req, nvmebuf is NULL, " "oxid: x%x flg: x%x state: x%x\n", ctxp->oxid, ctxp->flag, ctxp->state); @@ -2200,7 +2216,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) } if (ctxp->flag & LPFC_NVME_ABTS_RCV) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6324 IO oxid x%x aborted\n", ctxp->oxid); return; @@ -2264,7 +2280,7 @@ lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) } ctxp->flag &= ~LPFC_NVME_TNOTIFY; atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", ctxp->oxid, rc, atomic_read(&tgtp->rcv_fcp_cmd_in), @@ -2383,7 +2399,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctx_buf = NULL; if (!nvmebuf || !phba->targetport) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6157 NVMET FCP Drop IO\n"); if (nvmebuf) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); @@ -2456,7 +2472,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list); spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag); if (ctxp->state != LPFC_NVME_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6414 NVMET Context corrupt %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); } @@ -2498,7 +2514,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (!queue_work(phba->wq, &ctx_buf->defer_work)) { atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6325 Unable to queue work for oxid x%x. " "FCP Drop IO [x%x x%x x%x]\n", ctxp->oxid, @@ -2535,7 +2551,7 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint8_t cqflag) { if (!nvmebuf) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3167 NVMET FCP Drop IO\n"); return; } @@ -2581,7 +2597,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, union lpfc_wqe128 *wqe; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6104 NVMET prep LS wqe: link err: " "NPORT x%x oxid:x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2591,7 +2607,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = lpfc_sli_get_iocbq(phba); if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6105 NVMET prep LS wqe: No WQE: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2599,10 +2615,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6106 NVMET prep LS wqe: No ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2611,10 +2627,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, ctxp->wqeq = nvmewqe; /* prevent preparing wqe with NULL ndlp reference */ - nvmewqe->context1 = lpfc_nlp_get(ndlp); - if (nvmewqe->context1 == NULL) + nvmewqe->ndlp = lpfc_nlp_get(ndlp); + if (!nvmewqe->ndlp) goto nvme_wqe_free_wqeq_exit; - nvmewqe->context2 = ctxp; + nvmewqe->context_un.axchg = ctxp; wqe = &nvmewqe->wqe; memset(wqe, 0, sizeof(union lpfc_wqe)); @@ -2675,7 +2691,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; - nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; + nvmewqe->cmd_flag |= LPFC_IO_NVME_LS; /* Xmit NVMET response to remote NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, @@ -2686,8 +2702,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, return nvmewqe; nvme_wqe_free_wqeq_exit: - nvmewqe->context2 = NULL; - nvmewqe->context3 = NULL; + nvmewqe->context_un.axchg = NULL; + nvmewqe->ndlp = NULL; + nvmewqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, nvmewqe); return NULL; } @@ -2707,11 +2724,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, struct ulp_bde64 *bde; dma_addr_t physaddr; int i, cnt, nsegs; - int do_pbde; + bool use_pbde = false; int xc = 1; if (!lpfc_is_link_up(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6107 NVMET prep FCP wqe: link err:" "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2719,10 +2736,10 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6108 NVMET prep FCP wqe: no ndlp: " "NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2730,7 +2747,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, } if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6109 NVMET prep FCP wqe: seg cnt err: " "NPORT x%x oxid x%x ste %d cnt %d\n", ctxp->sid, ctxp->oxid, ctxp->state, @@ -2745,7 +2762,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Allocate buffer for command wqe */ nvmewqe = ctxp->ctxbuf->iocbq; if (nvmewqe == NULL) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6110 NVMET prep FCP wqe: No " "WQE: NPORT x%x oxid x%x ste %d\n", ctxp->sid, ctxp->oxid, ctxp->state); @@ -2763,7 +2780,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, (ctxp->state == LPFC_NVME_STE_DATA)) { wqe = &nvmewqe->wqe; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6111 Wrong state NVMET FCP: %d cnt %d\n", ctxp->state, ctxp->entry_cnt); return NULL; @@ -2815,9 +2832,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!xc) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); - /* Word 11 - set sup, irsp, irsplen later */ - do_pbde = 0; - /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; @@ -2842,7 +2856,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { @@ -2895,12 +2909,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, if (!xc) bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); - /* Word 11 - set pbde later */ - if (phba->cfg_enable_pbde) { - do_pbde = 1; + /* Word 11 - check for pbde */ + if (nsegs == 1 && phba->cfg_enable_pbde) { + use_pbde = true; + /* Word 11 - PBDE bit already preset by template */ } else { + /* Overwrite default template setting */ bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); - do_pbde = 0; } /* Word 12 */ @@ -2971,7 +2986,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, ((rsp->rsplen >> 2) - 1)); memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); } - do_pbde = 0; /* Word 12 */ wqe->fcp_trsp.rsvd_12_15[0] = 0; @@ -2992,7 +3006,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, nvmewqe->retry = 1; nvmewqe->vport = phba->pport; nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; - nvmewqe->context1 = ndlp; + nvmewqe->ndlp = ndlp; for_each_sg(rsp->sg, sgel, nsegs, i) { physaddr = sg_dma_address(sgel); @@ -3006,23 +3020,24 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, bf_set(lpfc_sli4_sge_last, sgl, 1); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(cnt); - if (i == 0) { - bde = (struct ulp_bde64 *)&wqe->words[13]; - if (do_pbde) { - /* Words 13-15 (PBDE) */ - bde->addrLow = sgl->addr_lo; - bde->addrHigh = sgl->addr_hi; - bde->tus.f.bdeSize = - le32_to_cpu(sgl->sge_len); - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bde->tus.w = cpu_to_le32(bde->tus.w); - } else { - memset(bde, 0, sizeof(struct ulp_bde64)); - } - } sgl++; ctxp->offset += cnt; } + + bde = (struct ulp_bde64 *)&wqe->words[13]; + if (use_pbde) { + /* decrement sgl ptr backwards once to first data sge */ + sgl--; + + /* Words 13-15 (PBDE) */ + bde->addrLow = sgl->addr_lo; + bde->addrHigh = sgl->addr_hi; + bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } else { + memset(bde, 0, sizeof(struct ulp_bde64)); + } ctxp->state = LPFC_NVME_STE_DATA; ctxp->entry_cnt++; return nvmewqe; @@ -3032,7 +3047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds @@ -3040,15 +3055,16 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, **/ static void lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; unsigned long flags; bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3079,8 +3095,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. @@ -3101,7 +3117,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for FCP cmds @@ -3109,15 +3125,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, **/ static void lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; unsigned long flags; uint32_t result; bool released = false; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; if (!ctxp) { @@ -3136,7 +3153,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, /* Sanity check */ if (ctxp->state != LPFC_NVME_STE_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6112 ABTS Wrong state:%d oxid x%x\n", ctxp->state, ctxp->oxid); } @@ -3163,8 +3180,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; /* * if transport has released ctx, then can reuse it. Otherwise, * will be recycled by transport release call. @@ -3182,7 +3199,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS * @phba: Pointer to HBA context object. * @cmdwqe: Pointer to driver command WQE object. - * @wcqe: Pointer to driver response CQE object. + * @rspwqe: Pointer to driver response WQE object. * * The function is called from SLI ring event handler with no * lock held. This function is the completion handler for NVME ABTS for LS cmds @@ -3190,13 +3207,14 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, **/ static void lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, - struct lpfc_wcqe_complete *wcqe) + struct lpfc_iocbq *rspwqe) { struct lpfc_async_xchg_ctx *ctxp; struct lpfc_nvmet_tgtport *tgtp; uint32_t result; + struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - ctxp = cmdwqe->context2; + ctxp = cmdwqe->context_un.axchg; result = wcqe->parameter; if (phba->nvmet_support) { @@ -3210,7 +3228,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, result, wcqe->word3); if (!ctxp) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6415 NVMET LS Abort No ctx: WCQE: " "%08x %08x %08x %08x\n", wcqe->word0, wcqe->total_data_placed, @@ -3221,14 +3239,14 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } if (ctxp->state != LPFC_NVME_STE_LS_ABORT) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6416 NVMET LS abort cmpl state mismatch: " "oxid x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } - cmdwqe->context2 = NULL; - cmdwqe->context3 = NULL; + cmdwqe->rsp_dmabuf = NULL; + cmdwqe->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, cmdwqe); kfree(ctxp); } @@ -3251,12 +3269,12 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; ndlp = lpfc_findnode_did(phba->pport, sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { if (tgtp) atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6134 Drop ABTS - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -3302,7 +3320,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); /* Word 10 */ - bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_LENLOC_WORD12); @@ -3316,10 +3333,10 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, OTHER_COMMAND); abts_wqeq->vport = phba->pport; - abts_wqeq->context1 = ndlp; - abts_wqeq->context2 = ctxp; - abts_wqeq->context3 = NULL; - abts_wqeq->rsvd2 = 0; + abts_wqeq->ndlp = ndlp; + abts_wqeq->context_un.axchg = ctxp; + abts_wqeq->bpl_dmabuf = NULL; + abts_wqeq->num_bdes = 0; /* hba_wqidx should already be setup from command we are aborting */ abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; abts_wqeq->iocb.ulpLe = 1; @@ -3339,7 +3356,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_iocbq *abts_wqeq; struct lpfc_nodelist *ndlp; unsigned long flags; - u8 opt; + bool ia; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3349,11 +3366,11 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } ndlp = lpfc_findnode_did(phba->pport, sid); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + if (!ndlp || ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6160 Drop ABORT - wrong NDLP state x%x.\n", (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); @@ -3369,7 +3386,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6161 ABORT failed: No wqeqs: " "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ @@ -3379,7 +3396,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } abts_wqeq = ctxp->abort_wqeq; ctxp->state = LPFC_NVME_STE_ABORT; - opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0; + ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false; spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ @@ -3391,14 +3408,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* If the hba is getting reset, this flag is set. It is * cleared when the reset is complete and rings reestablished. */ - spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6163 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x oxid x%x\n", + "NVME Req now. hba_flag x%lx oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); spin_lock_irqsave(&ctxp->ctxlock, flags); @@ -3407,11 +3422,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 0; } + spin_lock_irqsave(&phba->hbalock, flags); /* Outstanding abort is in progress */ - if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { + if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6164 Outstanding NVME I/O Abort Request " "still pending on oxid x%x\n", ctxp->oxid); @@ -3423,16 +3439,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } /* Ready - mark outstanding as aborted by driver. */ - abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; + abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED; - lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt); + lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag, + abts_wqeq->iotag, CLASS3, + LPFC_WQE_CQ_ID_DEFAULT, ia, true); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; - abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVME; - abts_wqeq->context2 = ctxp; + abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME; + abts_wqeq->context_un.axchg = ctxp; abts_wqeq->vport = phba->pport; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; @@ -3449,7 +3466,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, ctxp->flag &= ~LPFC_NVME_ABORT_OP; spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6166 Failed ABORT issue_wqe with status x%x " "for oxid x%x.\n", rc, ctxp->oxid); @@ -3474,7 +3491,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, } if (ctxp->state == LPFC_NVME_STE_FREE) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); rc = WQE_BUSY; @@ -3488,9 +3505,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq = ctxp->wqeq; - abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVMET; + abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVMET; if (!ctxp->hdwq) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; @@ -3512,7 +3528,7 @@ aerr: spin_unlock_irqrestore(&ctxp->ctxlock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6135 Failed to Issue ABTS for oxid x%x. Status x%x " "(%x)\n", ctxp->oxid, rc, released); @@ -3544,7 +3560,7 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, ctxp->state = LPFC_NVME_STE_LS_ABORT; ctxp->entry_cnt++; } else { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6418 NVMET LS abort state mismatch " "IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); @@ -3558,7 +3574,7 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, /* Issue ABTS for this WQE based on iotag */ ctxp->wqeq = lpfc_sli_get_iocbq(phba); if (!ctxp->wqeq) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6068 Abort failed: No wqeqs: " "xri: x%x\n", xri); /* No failure to an ABTS request. */ @@ -3574,9 +3590,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, } spin_lock_irqsave(&phba->hbalock, flags); - abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; - abts_wqeq->iocb_cmpl = NULL; - abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; + abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; + abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { @@ -3587,10 +3602,10 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, out: if (tgtp) atomic_inc(&tgtp->xmt_abort_rsp_error); - abts_wqeq->context2 = NULL; - abts_wqeq->context3 = NULL; + abts_wqeq->rsp_dmabuf = NULL; + abts_wqeq->bpl_dmabuf = NULL; lpfc_sli_release_iocbq(phba, abts_wqeq); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6056 Failed to Issue ABTS. Status x%x\n", rc); return 1; } @@ -3598,8 +3613,8 @@ out: /** * lpfc_nvmet_invalidate_host * - * @phba - pointer to the driver instance bound to an adapter port. - * @ndlp - pointer to an lpfc_nodelist type + * @phba: pointer to the driver instance bound to an adapter port. + * @ndlp: pointer to an lpfc_nodelist type * * This routine upcalls the nvmet transport to invalidate an NVME * host to which this target instance had active connections. @@ -3607,15 +3622,33 @@ out: void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { + u32 ndlp_has_hh; struct lpfc_nvmet_tgtport *tgtp; - lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS, + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, "6203 Invalidating hosthandle x%px\n", ndlp); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE); + spin_lock_irq(&ndlp->lock); + ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH; + spin_unlock_irq(&ndlp->lock); + + /* Do not invalidate any nodes that do not have a hosthandle. + * The host_release callbk will cause a node reference + * count imbalance and a crash. + */ + if (!ndlp_has_hh) { + lpfc_printf_log(phba, KERN_INFO, + LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC, + "6204 Skip invalidate on node x%px DID x%x\n", + ndlp, ndlp->nlp_DID); + return; + } + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) /* Need to get the nvmet_fc_target_port pointer here.*/ nvmet_fc_invalidate_host(phba->targetport, ndlp); |
