diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_mbx.c')
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_mbx.c | 173 |
1 files changed, 144 insertions, 29 deletions
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 10d2655ef676..1f01576f044b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -9,6 +9,12 @@ #include <linux/delay.h> #include <linux/gfp.h> +#ifdef CONFIG_PPC +#define IS_PPCARCH true +#else +#define IS_PPCARCH false +#endif + static struct mb_cmd_name { uint16_t cmd; const char *str; @@ -188,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || ha->flags.eeh_busy) { ql_log(ql_log_warn, vha, 0xd035, - "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", + "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]); rval = QLA_ABORTED; goto premature_exit; @@ -232,6 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ql_dbg(ql_dbg_mbx, vha, 0x1112, "mbox[%d]<-0x%04x\n", cnt, *iptr); wrt_reg_word(optr, *iptr); + } else { + wrt_reg_word(optr, 0); } mboxes >>= 1; @@ -245,6 +253,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + reinit_completion(&ha->mbx_intr_comp); /* Unlock mbx registers and wait for interrupt */ ql_dbg(ql_dbg_mbx, vha, 0x100f, @@ -265,9 +274,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies; - atomic_inc(&ha->num_pend_mbx_stage3); if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { + ql_dbg(ql_dbg_mbx, vha, 0x117a, + "cmd=%x Timeout.\n", command); + spin_lock_irqsave(&ha->hardware_lock, flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + reinit_completion(&ha->mbx_intr_comp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (chip_reset != ha->chip_reset) { eeh_delay = ha->flags.eeh_busy ? 1 : 0; @@ -276,16 +291,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); - atomic_dec(&ha->num_pend_mbx_stage3); rval = QLA_ABORTED; goto premature_exit; } - ql_dbg(ql_dbg_mbx, vha, 0x117a, - "cmd=%x Timeout.\n", command); - spin_lock_irqsave(&ha->hardware_lock, flags); - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - } else if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { eeh_delay = ha->flags.eeh_busy ? 1 : 0; @@ -294,11 +302,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); - atomic_dec(&ha->num_pend_mbx_stage3); rval = QLA_ABORTED; goto premature_exit; } - atomic_dec(&ha->num_pend_mbx_stage3); if (time_after(jiffies, wait_time + 5 * HZ)) ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", @@ -689,7 +695,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t *mcp = &mc; u8 semaphore = 0; #define EXE_FW_FORCE_SEMAPHORE BIT_7 - u8 retry = 3; + u8 retry = 5; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, "Entered %s.\n", __func__); @@ -728,6 +734,9 @@ again: vha->min_supported_speed = nv->min_supported_speed; } + + if (IS_PPCARCH) + mcp->mb[11] |= BIT_4; } if (ha->flags.exlogins_enabled) @@ -764,6 +773,12 @@ again: goto again; } + if (retry) { + retry--; + ql_dbg(ql_dbg_async, vha, 0x509d, + "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry); + goto again; + } ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); vha->hw_err_cnt++; @@ -2134,7 +2149,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, sizeof(*pdb), DMA_FROM_DEVICE); - if (!pdb_dma) { + if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) { ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } @@ -2196,6 +2211,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, "Entered %s.\n", __func__); + if (!ha->flags.fw_started) + return QLA_FUNCTION_FAILED; + mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; if (IS_FWI2_CAPABLE(vha->hw)) @@ -3029,8 +3047,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } @@ -3052,7 +3069,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) * Kernel context. */ int -qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) +qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, + u8 *num_entries) { int rval; mbx_cmd_t mc; @@ -3092,6 +3110,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); + if (num_entries) + *num_entries = pmap[0]; } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); @@ -3991,7 +4011,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ha->current_topology = ISP_CFG_NL; - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ @@ -4107,7 +4127,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, WWN_SIZE); } - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); @@ -4134,7 +4154,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (!found) return; - qlt_update_host_map(vp, id); + qla_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the @@ -4165,7 +4185,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, ha->flags.n2n_ae = 1; spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); + qla_update_vp_map(vha, SET_AL_PA); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -5621,7 +5641,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) - mcp->in_mb |= MBX_3; + mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6457,6 +6477,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } +int +qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha, + struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp) +{ + int rval; + dma_addr_t dd_dma; + uint size = sizeof(dd->buf); + uint16_t options = dd->options; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd->buf, size, DMA_FROM_DEVICE); + if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { + ql_log(ql_log_warn, vha, 0x1194, + "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd->buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0; + mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE); + + return rval; +} + static void qla2x00_async_mb_sp_done(srb_t *sp, int res) { sp->u.iocb_cmd.u.mbx.rc = res; @@ -6479,23 +6547,21 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) if (!vha->hw->flags.fw_started) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; - sp->type = SRB_MB_IOCB; - sp->name = mb_to_str(mcp->mb[0]); - c = &sp->u.iocb_cmd; - c->timeout = qla2x00_async_iocb_timeout; init_completion(&c->u.mbx.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_mb_sp_done); memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); - sp->done = qla2x00_async_mb_sp_done; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1018, @@ -6527,7 +6593,56 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) } done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +int qla24xx_print_fc_port_id(struct scsi_qla_host *vha, struct seq_file *s, u16 loop_id) +{ + int rval = QLA_FUNCTION_FAILED; + dma_addr_t pd_dma; + struct port_database_24xx *pd; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + + if (!vha->hw->flags.fw_started) + goto done; + + pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); + if (pd == NULL) { + ql_log(ql_log_warn, vha, 0xd047, + "Failed to allocate port database structure.\n"); + goto done; + } + + memset(&mc, 0, sizeof(mc)); + mc.mb[0] = MBC_GET_PORT_DATABASE; + mc.mb[1] = loop_id; + mc.mb[2] = MSW(pd_dma); + mc.mb[3] = LSW(pd_dma); + mc.mb[6] = MSW(MSD(pd_dma)); + mc.mb[7] = LSW(MSD(pd_dma)); + mc.mb[9] = vha->vp_idx; + + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1193, "%s: fail\n", __func__); + goto done_free_sp; + } + + ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", + __func__, pd->port_name); + + seq_printf(s, "%8phC %02x%02x%02x %d\n", + pd->port_name, pd->port_id[0], + pd->port_id[1], pd->port_id[2], + loop_id); + +done_free_sp: + if (pd) + dma_pool_free(ha->s_dma_pool, pd, pd_dma); done: return rval; } |
