diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
| -rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 256 |
1 files changed, 137 insertions, 119 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a62e091894f6..6d9d8c196936 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -25,7 +25,7 @@ #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <linux/blk-cgroup.h> @@ -109,8 +109,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, } } -#define LPFC_INVALID_REFTAG ((u32)-1) - /** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. @@ -169,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) struct Scsi_Host *shost; struct scsi_device *sdev; unsigned long new_queue_depth; - unsigned long num_rsrc_err, num_cmd_success; + unsigned long num_rsrc_err; int i; num_rsrc_err = atomic_read(&phba->num_rsrc_err); - num_cmd_success = atomic_read(&phba->num_cmd_success); /* * The error and success command counters are global per @@ -188,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { - new_queue_depth = - sdev->queue_depth * num_rsrc_err / - (num_rsrc_err + num_cmd_success); - if (!new_queue_depth) - new_queue_depth = sdev->queue_depth - 1; + if (num_rsrc_err >= sdev->queue_depth) + new_queue_depth = 1; else new_queue_depth = sdev->queue_depth - - new_queue_depth; + num_rsrc_err; scsi_change_queue_depth(sdev, new_queue_depth); } } lpfc_destroy_vport_work_array(phba, vports); atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); } /** @@ -397,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + /* may be called before queues established if hba_setup fails */ + if (!phba->sli4_hba.hdwq) + return; + spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; @@ -481,9 +478,11 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, ndlp = psb->rdata->pnode; else ndlp = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_lock_irqsave(&phba->rrq_list_lock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irqrestore(&phba->rrq_list_lock, iflag); if (ndlp && !offline) { lpfc_set_rrq_active(phba, ndlp, psb->cur_iocbq.sli4_lxritag, rxid, 1); @@ -537,7 +536,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); psb->flags &= ~LPFC_SBUF_XBUSY; spin_unlock_irqrestore(&phba->hbalock, iflag); - if (!list_empty(&pring->txq)) + if (test_bit(HBA_SETUP, &phba->hba_flag) && + !list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; } @@ -605,7 +605,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, { struct lpfc_io_buf *lpfc_cmd; struct lpfc_sli4_hdw_queue *qp; - struct sli4_sge *sgl; + struct sli4_sge_le *sgl; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; uint32_t cpu, idx; @@ -656,23 +656,23 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * The balance are sg list bdes. Initialize the * first two and leave the rest for queuecommand. */ - sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl; pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + bf_set_le32(lpfc_sli4_sge_last, sgl, 0); + if (cmnd && cmnd->cmd_len > LPFC_FCP_CDB_LEN) + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd32)); + else + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + sgl++; /* Setup the physical region for the FCP RSP */ - pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd32); sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); + bf_set_le32(lpfc_sli4_sge_last, sgl, 1); sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { @@ -978,8 +978,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgpe = scsi_prot_sglist(sc); lba = scsi_prot_ref_tag(sc); - if (lba == LPFC_INVALID_REFTAG) - return 0; /* First check if we need to match the LBA */ if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { @@ -1560,8 +1558,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -1723,8 +1719,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -1926,7 +1920,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd) @@ -1934,8 +1928,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; - int i = 0, num_sge = 0, status; - uint32_t reftag; + int i = 0, status; + uint32_t reftag, num_sge = 0; uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; @@ -1953,8 +1947,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command for pde*/ reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -2109,7 +2101,7 @@ out: * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd) @@ -2133,8 +2125,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t rc; #endif uint32_t checking = 1; - uint32_t dma_offset = 0; - int num_sge = 0, j = 2; + uint32_t dma_offset = 0, num_sge = 0; + int j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); @@ -2154,8 +2146,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = scsi_prot_interval(sc); reftag = scsi_prot_ref_tag(sc); - if (reftag == LPFC_INVALID_REFTAG) - goto out; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); @@ -2623,7 +2613,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpLe = 1; fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* * Due to difference in data length between DIF/non-DIF paths, @@ -2740,16 +2730,14 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) sgde = scsi_sglist(cmd); blksize = scsi_prot_interval(cmd); data_src = (uint8_t *)sg_virt(sgde); - data_len = sgde->length; + data_len = sg_dma_len(sgde); if ((data_len & (blksize - 1)) == 0) chk_guard = 1; src = (struct scsi_dif_tuple *)sg_virt(sgpe); start_ref_tag = scsi_prot_ref_tag(cmd); - if (start_ref_tag == LPFC_INVALID_REFTAG) - goto out; start_app_tag = src->app_tag; - len = sgpe->length; + len = sg_dma_len(sgpe); while (src && protsegcnt) { while (len) { @@ -2814,7 +2802,7 @@ skipit: goto out; data_src = (uint8_t *)sg_virt(sgde); - data_len = sgde->length; + data_len = sg_dma_len(sgde); if ((data_len & (blksize - 1)) == 0) chk_guard = 1; } @@ -2824,7 +2812,7 @@ skipit: sgpe = sg_next(sgpe); if (sgpe) { src = (struct scsi_dif_tuple *)sg_virt(sgpe); - len = sgpe->length; + len = sg_dma_len(sgpe); } else { src = NULL; } @@ -3242,14 +3230,18 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * explicitly reinitialized. * all iocb memory resources are reused. */ - fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = + cpu_to_be32(scsi_bufflen(scsi_cmnd)); + else + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = scsi_bufflen(scsi_cmnd); init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3437,15 +3429,18 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); + if (lpfc_cmd->pCmd->cmd_len > LPFC_FCP_CDB_LEN) + ((struct fcp_cmnd32 *)fcp_cmnd)->fcpDl = cpu_to_be32(fcpdl); + else + fcp_cmnd->fcpDl = cpu_to_be32(fcpdl); /* Set first-burst provided it was successfully negotiated */ - if (!(phba->hba_flag & HBA_FCOE_MODE) && + if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) && vport->cfg_first_burst_size && scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { u32 init_len, total_len; - total_len = be32_to_cpu(fcp_cmnd->fcpDl); + total_len = fcpdl; init_len = min(total_len, vport->cfg_first_burst_size); /* Word 4 & 5 */ @@ -3453,8 +3448,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, wqe->fcp_iwrite.total_xfer_len = total_len; } else { /* Word 4 */ - wqe->fcp_iwrite.total_xfer_len = - be32_to_cpu(fcp_cmnd->fcpDl); + wqe->fcp_iwrite.total_xfer_len = fcpdl; } /* @@ -3493,11 +3487,11 @@ err: scsi_cmnd->sc_data_direction); lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "9084 Cannot setup S/G List for HBA" - "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + "9084 Cannot setup S/G List for HBA " + "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n", lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, - prot_group_type, num_sge); + prot_group_type, num_sge, ret); lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; @@ -3911,7 +3905,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, fcprsp->rspInfo3); scsi_set_resid(cmnd, 0); - fcpDl = be32_to_cpu(fcpcmd->fcpDl); + if (cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcpDl = be32_to_cpu(((struct fcp_cmnd32 *)fcpcmd)->fcpDl); + else + fcpDl = be32_to_cpu(fcpcmd->fcpDl); if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); @@ -4637,7 +4634,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -4740,6 +4737,14 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_NONE); } + + /* Additional fcp cdb length field calculation. + * LPFC_FCP_CDB_LEN_32 - normal 16 byte cdb length, + * then divide by 4 for the word count. + * shift 2 because of the RDDATA/WRDATA. + */ + if (scsi_cmnd->cmd_len > LPFC_FCP_CDB_LEN) + fcp_cmnd->fcpCntl3 |= 4 << 2; } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], @@ -4815,7 +4820,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); - ptr = &fcp_cmnd->fcpCdb[0]; + ptr = &((struct fcp_cmnd32 *)fcp_cmnd)->fcpCdb[0]; memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { ptr += scsi_cmnd->cmd_len; @@ -5060,7 +5065,7 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba) /* Check for valid Emulex Device ID */ if (phba->sli_rev != LPFC_SLI_REV4 || - phba->hba_flag & HBA_FCOE_MODE) { + test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); @@ -5136,6 +5141,12 @@ lpfc_info(struct Scsi_Host *host) goto buffer_done; } + /* Support for BSG ioctls */ + scnprintf(tmp, sizeof(tmp), " BSG"); + if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= + sizeof(lpfcinfobuf)) + goto buffer_done; + /* PCI resettable */ if (!lpfc_check_pci_resettable(phba)) { scnprintf(tmp, sizeof(tmp), " PCI resettable"); @@ -5184,7 +5195,7 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba) **/ void lpfc_poll_timeout(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); + struct lpfc_hba *phba = timer_container_of(phba, t, fcp_poll_timer); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, @@ -5346,20 +5357,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) cmnd->cmnd[0], scsi_prot_ref_tag(cmnd), scsi_logical_block_count(cmnd), - (cmnd->cmnd[1]>>5)); + scsi_get_prot_type(cmnd)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { - if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, - KERN_INFO, LOG_SCSI_CMD, - "9038 BLKGRD: rcvd PROT_NORMAL cmd: " - "x%x reftag x%x cnt %u pt %x\n", - cmnd->cmnd[0], - scsi_prot_ref_tag(cmnd), - scsi_logical_block_count(cmnd), - (cmnd->cmnd[1]>>5)); - } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } @@ -5492,7 +5493,7 @@ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) struct lpfc_vmid *cur; if (vport->port_type == LPFC_PHYSICAL_PORT) - del_timer_sync(&vport->phba->inactive_vmid_poll); + timer_delete_sync(&vport->phba->inactive_vmid_poll); kfree(vport->qfpa_res); kfree(vport->vmid_priority.vmid_range); @@ -5547,7 +5548,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) spin_lock(&phba->hbalock); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { + if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); @@ -5565,11 +5566,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; - if (!pring_s4) { + /* if the io_wq & pring are gone, the port was reset. */ + if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq || + !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2877 SCSI Layer I/O Abort Request " + "IO CMPL Status x%x ID %d LUN %llu " + "HBA_SETUP %d\n", FAILED, + cmnd->device->id, + (u64)cmnd->device->lun, + test_bit(HBA_SETUP, &phba->hba_flag)); ret = FAILED; goto out_unlock_hba; } + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ @@ -5640,9 +5650,8 @@ wait_for_cmpl: * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait * for abort to complete. */ - wait_event_timeout(waitq, - (lpfc_cmd->pCmd != cmnd), - msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), + secs_to_jiffies(2*vport->cfg_devloss_tmo)); spin_lock(&lpfc_cmd->buf_lock); @@ -5830,7 +5839,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -5906,7 +5915,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) * If target is not in a MAPPED state, delay until * target is rediscovered or devloss timeout expires. */ - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies)) { if (!pnode) return FAILED; @@ -5926,7 +5935,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) /** * lpfc_reset_flush_io_context - * @vport: The virtual port (scsi_host) for the flush context - * @tgt_id: If aborting by Target contect - specifies the target id + * @tgt_id: If aborting by Target context - specifies the target id * @lun_id: If aborting by Lun context - specifies the lun id * @context: specifies the context level to flush at. * @@ -5952,7 +5961,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, lpfc_sli_abort_taskmgmt(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); @@ -6095,13 +6104,19 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } - lpfc_reset_flush_io_context(vport, tgt_id, lun_id, - LPFC_CTX_TGT); + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + if (status != SUCCESS) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0726 Target Reset flush status x%x\n", + status); + return status; + } return FAST_IO_FAIL; } @@ -6121,31 +6136,27 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) /* Issue LOGO, if no LOGO is outstanding */ spin_lock_irqsave(&pnode->lock, flags); - if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && + if (!test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags) && !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; - pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); + set_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags); lpfc_unreg_rpi(vport, pnode); wait_event_timeout(waitq, - (!(pnode->save_flags & - NLP_WAIT_FOR_LOGO)), - msecs_to_jiffies(dev_loss_tmo * - 1000)); + !test_bit(NLP_WAIT_FOR_LOGO, + &pnode->save_flags), + secs_to_jiffies(dev_loss_tmo)); - if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { + if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, + &pnode->save_flags)) lpfc_printf_vlog(vport, KERN_ERR, logit, "0725 SCSI layer TGTRST " "failed & LOGO TMO (%d, %llu) " "return x%x\n", tgt_id, lun_id, status); - spin_lock_irqsave(&pnode->lock, flags); - pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; - } else { - spin_lock_irqsave(&pnode->lock, flags); - } + spin_lock_irqsave(&pnode->lock, flags); pnode->logo_waitq = NULL; spin_unlock_irqrestore(&pnode->lock, flags); status = SUCCESS; @@ -6197,7 +6208,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) int rc, ret = SUCCESS; lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data:\n"); + "3172 SCSI layer issued Host Reset\n"); lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); @@ -6227,7 +6238,7 @@ error: } /** - * lpfc_slave_alloc - scsi_host_template slave_alloc entry point + * lpfc_sdev_init - scsi_host_template sdev_init entry point * @sdev: Pointer to scsi_device. * * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's @@ -6240,7 +6251,7 @@ error: * 0 - Success **/ static int -lpfc_slave_alloc(struct scsi_device *sdev) +lpfc_sdev_init(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6343,8 +6354,9 @@ lpfc_slave_alloc(struct scsi_device *sdev) } /** - * lpfc_slave_configure - scsi_host_template slave_configure entry point + * lpfc_sdev_configure - scsi_host_template sdev_configure entry point * @sdev: Pointer to scsi_device. + * @lim: Request queue limits. * * This routine configures following items * - Tag command queuing support for @sdev if supported. @@ -6354,7 +6366,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) * 0 - Success **/ static int -lpfc_slave_configure(struct scsi_device *sdev) +lpfc_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6372,13 +6384,13 @@ lpfc_slave_configure(struct scsi_device *sdev) } /** - * lpfc_slave_destroy - slave_destroy entry point of SHT data structure + * lpfc_sdev_destroy - sdev_destroy entry point of SHT data structure * @sdev: Pointer to scsi_device. * * This routine sets @sdev hostatdata filed to null. **/ static void -lpfc_slave_destroy(struct scsi_device *sdev) +lpfc_sdev_destroy(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; @@ -6423,7 +6435,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, { struct lpfc_device_data *lun_info; - int memory_flags; + gfp_t memory_flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || !(phba->cfg_fof)) @@ -6738,7 +6750,13 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } static int -lpfc_no_slave(struct scsi_device *sdev) +lpfc_init_no_sdev(struct scsi_device *sdev) +{ + return -ENODEV; +} + +static int +lpfc_config_no_sdev(struct scsi_device *sdev, struct queue_limits *lim) { return -ENODEV; } @@ -6749,8 +6767,8 @@ struct scsi_host_template lpfc_template_nvme = { .proc_name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_no_command, - .slave_alloc = lpfc_no_slave, - .slave_configure = lpfc_no_slave, + .sdev_init = lpfc_init_no_sdev, + .sdev_configure = lpfc_config_no_sdev, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = 1, @@ -6773,9 +6791,9 @@ struct scsi_host_template lpfc_template = { .eh_device_reset_handler = lpfc_device_reset_handler, .eh_target_reset_handler = lpfc_target_reset_handler, .eh_host_reset_handler = lpfc_host_reset_handler, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, + .sdev_init = lpfc_sdev_init, + .sdev_configure = lpfc_sdev_configure, + .sdev_destroy = lpfc_sdev_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, @@ -6800,9 +6818,9 @@ struct scsi_host_template lpfc_vport_template = { .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = NULL, .eh_host_reset_handler = NULL, - .slave_alloc = lpfc_slave_alloc, - .slave_configure = lpfc_slave_configure, - .slave_destroy = lpfc_slave_destroy, + .sdev_init = lpfc_sdev_init, + .sdev_configure = lpfc_sdev_configure, + .sdev_destroy = lpfc_sdev_destroy, .scan_finished = lpfc_scan_finished, .this_id = -1, .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, |
