diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_init.c')
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 740 |
1 files changed, 534 insertions, 206 deletions
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 8d9ecabb1aac..d395cbfe6802 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -45,7 +45,7 @@ static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); void qla2x00_sp_timeout(struct timer_list *t) { - srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); + srb_t *sp = timer_container_of(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; scsi_qla_host_t *vha = sp->vha; @@ -67,7 +67,7 @@ void qla2x00_sp_free(srb_t *sp) { struct srb_iocb *iocb = &sp->u.iocb_cmd; - del_timer(&iocb->timer); + timer_delete(&iocb->timer); qla2x00_rel_sp(sp); } @@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data) sp->cmd_sp)) { qpair->req->outstanding_cmds[handle] = NULL; cmdsp_found = 1; + qla_put_fw_resources(qpair, &sp->cmd_sp->iores); } /* removing the abort */ if (qpair->req->outstanding_cmds[handle] == sp) { qpair->req->outstanding_cmds[handle] = NULL; sp_found = 1; + qla_put_fw_resources(qpair, &sp->iores); break; } } @@ -388,6 +390,12 @@ done_free_sp: fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; + + /* + * async login failed. Could be due to iocb/exchange resource + * being low. Set state DELETED for re-login process to start again. + */ + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); return rval; } @@ -415,7 +423,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, - qla2x00_async_logout_sp_done), + qla2x00_async_logout_sp_done); ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", @@ -500,6 +508,7 @@ static void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) { struct fc_port *fcport = ea->fcport; + unsigned long flags; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", @@ -514,9 +523,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x2066, "%s %8phC: adisc fail: post delete\n", __func__, ea->fcport->port_name); + + spin_lock_irqsave(&vha->work_lock, flags); /* deleted = 0 & logout_on_delete = force fw cleanup */ - fcport->deleted = 0; + if (fcport->deleted == QLA_SESS_DELETED) + fcport->deleted = 0; + fcport->logout_on_delete = 1; + spin_unlock_irqrestore(&vha->work_lock, flags); + qlt_schedule_sess_for_deletion(ea->fcport); return; } @@ -1126,7 +1141,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) u16 *mb; if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; + goto done; ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); @@ -1178,10 +1193,15 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); + /* + * use qla24xx_async_gnl_sp_done to purge all pending gnl request. + * kref_put is call behind the scene. + */ + sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR; + qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR); + fcport->flags &= ~(FCF_ASYNC_SENT); done: - fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); + fcport->flags &= ~(FCF_ASYNC_ACTIVE); return rval; } @@ -1438,7 +1458,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->login_gen++; - ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { @@ -1646,7 +1665,6 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u16 data[2]; - u64 wwn; u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, @@ -1686,7 +1704,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) switch (fcport->disc_state) { case DSC_DELETED: - wwn = wwn_to_u64(fcport->node_name); switch (vha->hw->current_topology) { case ISP_CFG_N: if (fcport_is_smaller(fcport)) { @@ -1710,12 +1727,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) } break; default: - if (wwn == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post GNNID\n", - __func__, __LINE__, fcport->port_name); - qla24xx_post_gnnid_work(vha, fcport); - } else if (fcport->loop_id == FC_NO_LOOP_ID) { + if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); @@ -1830,16 +1842,25 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, return qla2x00_post_work(vha, e); } +static void qla_rscn_gen_tick(scsi_qla_host_t *vha, u32 *ret_rscn_gen) +{ + *ret_rscn_gen = atomic_inc_return(&vha->rscn_gen); + /* memory barrier */ + wmb(); +} + void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport; unsigned long flags; + u32 rscn_gen; switch (ea->id.b.rsvd_1) { case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); if (fcport) { - if (fcport->flags & FCF_FCP2_DEVICE && + if (ql2xfc2target && + fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, vha, 0x2115, "Delaying session delete for FCP2 portid=%06x %8phC ", @@ -1862,15 +1883,16 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) * Otherwise we're already in the middle of a relogin */ fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } else { fcport->scan_needed = 1; - fcport->rscn_gen++; + qla_rscn_gen_tick(vha, &fcport->rscn_gen); } } break; case RSCN_AREA_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1878,11 +1900,12 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_DOM_ADDR: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) @@ -1890,19 +1913,20 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } } break; case RSCN_FAB_ADDR: default: + qla_rscn_gen_tick(vha, &rscn_gen); list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->flags & FCF_FCP2_DEVICE && atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; - fcport->rscn_gen++; + fcport->rscn_gen = rscn_gen; } break; } @@ -1911,6 +1935,7 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) if (vha->scan.scan_flags == 0) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); vha->scan.scan_flags |= SF_QUEUED; + vha->scan.rscn_gen_start = atomic_read(&vha->rscn_gen); schedule_delayed_work(&vha->scan.scan_work, 5); } spin_unlock_irqrestore(&vha->work_lock, flags); @@ -1994,12 +2019,17 @@ qla2x00_tmf_iocb_timeout(void *data) int rc, h; unsigned long flags; - rc = qla24xx_async_abort_cmd(sp, false); + if (sp->type == SRB_MARKER) + rc = QLA_FUNCTION_FAILED; + else + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { if (sp->qpair->req->outstanding_cmds[h] == sp) { sp->qpair->req->outstanding_cmds[h] = NULL; + qla_put_fw_resources(sp->qpair, &sp->iores); break; } } @@ -2010,24 +2040,154 @@ qla2x00_tmf_iocb_timeout(void *data) } } +static void qla_marker_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + if (res != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, + "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", + sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, + sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); + + sp->u.iocb_cmd.u.tmf.data = res; + complete(&tmf->u.tmf.comp); +} + +#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \ +{\ + int cnt = 5; \ + do { \ + if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\ + _rval = -EINVAL; \ + break; \ + } \ + _rval = qla2x00_start_sp(_sp); \ + if (_rval == -EAGAIN) \ + msleep(1); \ + else \ + break; \ + cnt--; \ + } while (cnt); \ +} + +/** + * qla26xx_marker: send marker IOCB and wait for the completion of it. + * @arg: pointer to argument list. + * It is assume caller will provide an fcport pointer and modifier + */ +static int +qla26xx_marker(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + u32 chip_gen, login_gen; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8039, + "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + + chip_gen = vha->hw->chip_reset; + login_gen = fcport->login_gen; + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MARKER; + sp->name = "marker"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.modifier = arg->modifier; + tm_iocb->u.tmf.lun = arg->lun; + tm_iocb->u.tmf.loop_id = fcport->loop_id; + tm_iocb->u.tmf.vp_index = vha->vp_idx; + + START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); + + ql_dbg(ql_dbg_taskm, vha, 0x8006, + "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8031, + "Marker IOCB send failure (%x).\n", rval); + goto done_free_sp; + } + + wait_for_completion(&tm_iocb->u.tmf.comp); + rval = tm_iocb->u.tmf.data; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8019, + "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + } + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; + if (res) + tmf->u.tmf.data = res; complete(&tmf->u.tmf.comp); } -int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, - uint32_t tag) +static int qla_tmf_wait(struct tmf_arg *arg) { - struct scsi_qla_host *vha = fcport->vha; + /* there are only 2 types of error handling that reaches here, lun or target reset */ + if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET)) + return qla2x00_eh_wait_for_pending_commands(arg->vha, + arg->fcport->d_id.b24, arg->lun, WAIT_LUN); + else + return qla2x00_eh_wait_for_pending_commands(arg->vha, + arg->fcport->d_id.b24, arg->lun, WAIT_TARGET); +} + +static int +__qla2x00_async_tm_cmd(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + u32 chip_gen, login_gen; + u64 jif; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8032, + "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + + chip_gen = vha->hw->chip_reset; + login_gen = fcport->login_gen; /* ref: INIT */ - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; @@ -2040,15 +2200,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); - tm_iocb->u.tmf.flags = flags; - tm_iocb->u.tmf.lun = lun; + tm_iocb->u.tmf.flags = arg->flags; + tm_iocb->u.tmf.lun = arg->lun; + + START_SP_W_RETRIES(sp, rval, chip_gen, login_gen); ql_dbg(ql_dbg_taskm, vha, 0x802f, - "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", - sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->flags, arg->lun, sp->qpair->id, rval); - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); @@ -2061,23 +2222,130 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, } if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { - flags = tm_iocb->u.tmf.flags; - lun = (uint16_t)tm_iocb->u.tmf.lun; + jif = jiffies; + if (qla_tmf_wait(arg)) { + ql_log(ql_log_info, vha, 0x803e, + "Waited %u ms Nexus=%ld:%06x:%llu.\n", + jiffies_to_msecs(jiffies - jif), vha->host_no, + fcport->d_id.b24, arg->lun); + } - /* Issue Marker IOCB */ - qla2x00_marker(vha, vha->hw->base_qpair, - fcport->loop_id, lun, - flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); + if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) { + rval = qla26xx_marker(arg); + } else { + ql_log(ql_log_info, vha, 0x803e, + "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n", + vha->host_no, fcport->d_id.b24, arg->lun); + rval = QLA_FUNCTION_FAILED; + } } + if (tm_iocb->u.tmf.data) + rval = tm_iocb->u.tmf.data; done_free_sp: /* ref: INIT */ kref_put(&sp->cmd_kref, qla2x00_sp_release); - fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } +static void qla_put_tmf(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + ha->active_tmf--; + list_del(&arg->tmf_elem); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +static +int qla_get_tmf(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + fc_port_t *fcport = arg->fcport; + int rc = 0; + struct tmf_arg *t; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_for_each_entry(t, &ha->tmf_active, tmf_elem) { + if (t->fcport == arg->fcport && t->lun == arg->lun) { + /* reject duplicate TMF */ + ql_log(ql_log_warn, vha, 0x802c, + "found duplicate TMF. Nexus=%ld:%06x:%llu.\n", + vha->host_no, fcport->d_id.b24, arg->lun); + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + return -EINVAL; + } + } + + list_add_tail(&arg->tmf_elem, &ha->tmf_pending); + while (ha->active_tmf >= MAX_ACTIVE_TMF) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + msleep(1); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x802c, + "Unable to acquire TM resource due to disruption.\n"); + rc = EIO; + break; + } + if (ha->active_tmf < MAX_ACTIVE_TMF && + list_is_first(&arg->tmf_elem, &ha->tmf_pending)) + break; + } + + list_del(&arg->tmf_elem); + + if (!rc) { + ha->active_tmf++; + list_add_tail(&arg->tmf_elem, &ha->tmf_active); + } + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return rc; +} + +int +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, + uint32_t tag) +{ + struct scsi_qla_host *vha = fcport->vha; + struct tmf_arg a; + int rval = QLA_SUCCESS; + + if (TMF_NOT_READY(fcport)) + return QLA_SUSPENDED; + + a.vha = fcport->vha; + a.fcport = fcport; + a.lun = lun; + a.flags = flags; + INIT_LIST_HEAD(&a.tmf_elem); + + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { + a.modifier = MK_SYNC_ID_LUN; + } else { + a.modifier = MK_SYNC_ID; + } + + if (qla_get_tmf(&a)) + return QLA_FUNCTION_FAILED; + + a.qpair = vha->hw->base_qpair; + rval = __qla2x00_async_tm_cmd(&a); + + qla_put_tmf(&a); + return rval; +} + int qla24xx_async_abort_command(srb_t *sp) { @@ -2315,7 +2583,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", + "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); } else { @@ -2413,6 +2681,40 @@ exit: return rval; } +void qla_enable_fce_trace(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + if (ha->fce) { + ha->flags.fce_enabled = 1; + memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); + rval = qla2x00_enable_fce_trace(vha, + ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs); + + if (rval) { + ql_log(ql_log_warn, vha, 0x8033, + "Unable to reinitialize FCE (%d).\n", rval); + ha->flags.fce_enabled = 0; + } + } +} + +static void qla_enable_eft_trace(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + + if (ha->eft) { + memset(ha->eft, 0, EFT_SIZE); + rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS); + + if (rval) { + ql_log(ql_log_warn, vha, 0x8034, + "Unable to reinitialize EFT (%d).\n", rval); + } + } +} /* * qla2x00_initialize_adapter * Initialize board. @@ -3415,26 +3717,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) return rval; } -static void -qla2x00_init_fce_trace(scsi_qla_host_t *vha) +int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) { - int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) - return; + return -EINVAL; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - return; + return -EINVAL; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); - return; + return -EIO; } /* Allocate memory for Fibre Channel Event Buffer. */ @@ -3444,30 +3744,30 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); - return; - } - - rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, - ha->fce_mb, &ha->fce_bufs); - if (rval) { - ql_log(ql_log_warn, vha, 0x00bf, - "Unable to initialize FCE (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); - return; + return -ENOMEM; } ql_dbg(ql_dbg_init, vha, 0x00c0, "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); - ha->flags.fce_enabled = 1; ha->fce_dma = tc_dma; ha->fce = tc; + ha->fce_bufs = FCE_NUM_BUFFERS; + return 0; +} + +void qla2x00_free_fce_trace(struct qla_hw_data *ha) +{ + if (!ha->fce) + return; + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); + ha->fce = NULL; + ha->fce_dma = 0; } static void -qla2x00_init_eft_trace(scsi_qla_host_t *vha) +qla2x00_alloc_eft_trace(scsi_qla_host_t *vha) { - int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; @@ -3492,14 +3792,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha) return; } - rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x00c2, - "Unable to initialize EFT (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); - return; - } - ql_dbg(ql_dbg_init, vha, 0x00c3, "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); @@ -3507,13 +3799,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha) ha->eft = tc; } -static void -qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) -{ - qla2x00_init_fce_trace(vha); - qla2x00_init_eft_trace(vha); -} - void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { @@ -3568,10 +3853,11 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - qla2x00_init_fce_trace(vha); - if (ha->fce) + if (ha->fce) { fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; - qla2x00_init_eft_trace(vha); + ha->flags.fce_dump_buf_alloced = 1; + } + qla2x00_alloc_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; } @@ -3930,29 +4216,61 @@ out: return ha->flags.lr_detected; } -void qla_init_iocb_limit(scsi_qla_host_t *vha) +static void __qla_adjust_iocb_limit(struct qla_qpair *qpair) { - u16 i, num_qps; - u32 limit; - struct qla_hw_data *ha = vha->hw; + u8 num_qps; + u16 limit; + struct qla_hw_data *ha = qpair->vha->hw; num_qps = ha->num_qpairs + 1; limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; - ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; - ha->base_qpair->fwres.iocbs_limit = limit; - ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; + qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; + qpair->fwres.iocbs_limit = limit; + qpair->fwres.iocbs_qp_limit = limit / num_qps; + + qpair->fwres.exch_total = ha->orig_fw_xcb_count; + qpair->fwres.exch_limit = (ha->orig_fw_xcb_count * + QLA_IOCB_PCT_LIMIT) / 100; +} + +void qla_init_iocb_limit(scsi_qla_host_t *vha) +{ + u8 i; + struct qla_hw_data *ha = vha->hw; + + __qla_adjust_iocb_limit(ha->base_qpair); ha->base_qpair->fwres.iocbs_used = 0; + ha->base_qpair->fwres.exch_used = 0; + for (i = 0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { - ha->queue_pair_map[i]->fwres.iocbs_total = - ha->orig_fw_iocb_count; - ha->queue_pair_map[i]->fwres.iocbs_limit = limit; - ha->queue_pair_map[i]->fwres.iocbs_qp_limit = - limit / num_qps; + __qla_adjust_iocb_limit(ha->queue_pair_map[i]); ha->queue_pair_map[i]->fwres.iocbs_used = 0; + ha->queue_pair_map[i]->fwres.exch_used = 0; } } + + ha->fwres.iocb_total = ha->orig_fw_iocb_count; + ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100; + ha->fwres.exch_total = ha->orig_fw_xcb_count; + ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100; + + atomic_set(&ha->fwres.iocb_used, 0); + atomic_set(&ha->fwres.exch_used, 0); +} + +void qla_adjust_iocb_limit(scsi_qla_host_t *vha) +{ + u8 i; + struct qla_hw_data *ha = vha->hw; + + __qla_adjust_iocb_limit(ha->base_qpair); + + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) + __qla_adjust_iocb_limit(ha->queue_pair_map[i]); + } } /** @@ -3969,7 +4287,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; unsigned long flags; - uint16_t fw_major_version; int done_once = 0; if (IS_P3P_TYPE(ha)) { @@ -4036,7 +4353,6 @@ execute_fw_with_lr: goto failed; enable_82xx_npiv: - fw_major_version = ha->fw_major_version; if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else @@ -4053,6 +4369,7 @@ enable_82xx_npiv: ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; } + qlt_config_nvram_with_fw_version(vha); qla2x00_get_resource_cnts(vha); qla_init_iocb_limit(vha); @@ -4065,12 +4382,11 @@ enable_82xx_npiv: if (rval != QLA_SUCCESS) goto failed; - if (!fw_major_version && !(IS_P3P_TYPE(ha))) - qla2x00_alloc_offload_mem(vha); - if (ql2xallocfwdump && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); + qla_enable_fce_trace(vha); + qla_enable_eft_trace(vha); } else { goto failed; } @@ -4550,15 +4866,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha) if (ha->flags.edif_enabled) mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); + QLA_FW_STARTED(ha); rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { + QLA_FW_STOPPED(ha); ql_log(ql_log_fatal, vha, 0x00d2, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); - QLA_FW_STARTED(ha); vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; } @@ -4809,9 +5126,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); if (vha->hw->flags.edif_enabled) { if (topo != 2) - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } else if (!(topo == 2 && ha->flags.n2n_bigger)) - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!vha->flags.init_done) @@ -4849,7 +5166,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) - strlcpy(ha->model_desc, + strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { @@ -4857,14 +5174,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { - strlcpy(ha->model_number, + strscpy(ha->model_number, qla2x00_model_name[index * 2], sizeof(ha->model_number)); - strlcpy(ha->model_desc, + strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { - strlcpy(ha->model_number, def, + strscpy(ha->model_number, def, sizeof(ha->model_number)); } } @@ -5206,27 +5523,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) return (rval); } -static void -qla2x00_rport_del(void *data) -{ - fc_port_t *fcport = data; - struct fc_rport *rport; - unsigned long flags; - - spin_lock_irqsave(fcport->vha->host->host_lock, flags); - rport = fcport->drport ? fcport->drport : fcport->rport; - fcport->drport = NULL; - spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, - "%s %8phN. rport %p roles %x\n", - __func__, fcport->port_name, rport, - rport->roles); - - fc_remote_port_delete(rport); - } -} - void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; @@ -5300,6 +5596,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); + INIT_LIST_HEAD(&fcport->unsol_ctx_head); INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); @@ -5883,6 +6180,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { + unsigned long flags; + if (IS_SW_RESV_ADDR(fcport->d_id)) return; @@ -5892,7 +6191,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&vha->work_lock, flags); fcport->deleted = 0; + spin_unlock_irqrestore(&vha->work_lock, flags); + if (vha->hw->current_topology == ISP_CFG_NL) fcport->logout_on_delete = 0; else @@ -6013,7 +6316,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; - LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; int discovery_gen; @@ -6114,10 +6416,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) qlt_do_generation_tick(vha, &discovery_gen); if (USE_ASYNC_SCAN(ha)) { - rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI, - NULL); - if (rval) - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + /* start of scan begins here */ + vha->scan.rscn_gen_end = atomic_read(&vha->rscn_gen); + qla_fab_scan_start(vha); } else { list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->scan_state = QLA_FCPORT_SCAN; @@ -6743,33 +7044,6 @@ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) return rval; } -void -qla2x00_update_fcports(scsi_qla_host_t *base_vha) -{ - fc_port_t *fcport; - struct scsi_qla_host *vha, *tvp; - struct qla_hw_data *ha = base_vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->vport_slock, flags); - /* Go with deferred removal of rport references. */ - list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) { - atomic_inc(&vha->vref_count); - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_rport_del(fcport); - - spin_lock_irqsave(&ha->vport_slock, flags); - } - } - atomic_dec(&vha->vref_count); - wake_up(&vha->vref_waitq); - } - spin_unlock_irqrestore(&ha->vport_slock, flags); -} - /* Assumes idc_lock always held on entry */ void qla83xx_reset_ownership(scsi_qla_host_t *vha) @@ -7158,14 +7432,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } /* purge MBox commands */ - if (atomic_read(&ha->num_pend_mbx_stage3)) { + spin_lock_irqsave(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) { clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } + spin_unlock_irqrestore(&ha->hardware_lock, flags); i = 0; - while (atomic_read(&ha->num_pend_mbx_stage3) || - atomic_read(&ha->num_pend_mbx_stage2) || + while (atomic_read(&ha->num_pend_mbx_stage2) || atomic_read(&ha->num_pend_mbx_stage1)) { msleep(20); i++; @@ -7243,12 +7518,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) int qla2x00_abort_isp(scsi_qla_host_t *vha) { - int rval; uint8_t status = 0; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp, *tvp; struct req_que *req = ha->req_q_map[0]; unsigned long flags; + fc_port_t *fcport; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); @@ -7317,6 +7592,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); return status; } + + /* User may have updated [fcp|nvme] prefer in flash */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (NVME_PRIORITY(ha, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); @@ -7337,31 +7621,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (IS_QLA81XX(ha) || IS_QLA8031(ha)) qla2x00_get_fw_version(vha); - if (ha->fce) { - ha->flags.fce_enabled = 1; - memset(ha->fce, 0, - fce_calc_size(ha->fce_bufs)); - rval = qla2x00_enable_fce_trace(vha, - ha->fce_dma, ha->fce_bufs, ha->fce_mb, - &ha->fce_bufs); - if (rval) { - ql_log(ql_log_warn, vha, 0x8033, - "Unable to reinitialize FCE " - "(%d).\n", rval); - ha->flags.fce_enabled = 0; - } - } - if (ha->eft) { - memset(ha->eft, 0, EFT_SIZE); - rval = qla2x00_enable_eft_trace(vha, - ha->eft_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x8034, - "Unable to reinitialize EFT " - "(%d).\n", rval); - } - } } else { /* failed the ISP abort */ vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { @@ -7411,6 +7671,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); + /* User may have updated [fcp|nvme] prefer in flash */ + list_for_each_entry(fcport, &vp->vp_fcports, list) { + if (NVME_PRIORITY(ha, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + qla2x00_vp_abort_isp(vp); spin_lock_irqsave(&ha->vport_slock, flags); @@ -7961,15 +8229,21 @@ qla28xx_get_aux_images( struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_aux_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); goto check_sec_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a1, + "Unable to read Primary aux image(%x).\n", rc); + goto check_sec_image; + } qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { @@ -8000,9 +8274,15 @@ check_sec_image: goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + rc = qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a2, + "Unable to read Secondary aux image(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { @@ -8060,6 +8340,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, struct qla27xx_image_status pri_image_status, sec_image_status; bool valid_pri_image = false, valid_sec_image = false; bool active_pri_image = false, active_sec_image = false; + int rc; if (!ha->flt_region_img_status_pri) { ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); @@ -8101,8 +8382,14 @@ check_sec_image: goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), + rc = qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); + if (rc) { + ql_log(ql_log_info, vha, 0x01a3, + "Unable to read Secondary image status(%x).\n", rc); + goto check_valid_image; + } + qla27xx_print_image(vha, "Secondary image", &sec_image_status); if (qla27xx_check_image_status_signature(&sec_image_status)) { @@ -8174,11 +8461,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, "FW: Loading firmware from flash (%x).\n", faddr); dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 8); - if (qla24xx_risc_firmware_invalid(dcode)) { + rval = qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (rval || qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, - "Unable to verify the integrity of flash firmware " - "image.\n"); + "Unable to verify the integrity of flash firmware image (rval %x).\n", rval); ql_log(ql_log_fatal, vha, 0x008d, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); @@ -8192,7 +8478,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); - qla24xx_read_flash_data(vha, dcode, faddr, 10); + rval = qla24xx_read_flash_data(vha, dcode, faddr, 10); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016a, + "-> Unable to read segment addr + size .\n"); + return QLA_FUNCTION_FAILED; + } risc_addr = be32_to_cpu((__force __be32)dcode[2]); risc_size = be32_to_cpu((__force __be32)dcode[3]); if (!*srisc_addr) { @@ -8208,7 +8499,13 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_dbg(ql_dbg_init, vha, 0x008e, "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", fragment, risc_addr, faddr, dlen); - qla24xx_read_flash_data(vha, dcode, faddr, dlen); + rval = qla24xx_read_flash_data(vha, dcode, faddr, dlen); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016b, + "-> Unable to read fragment(faddr %#x dlen %#lx).\n", + faddr, dlen); + return QLA_FUNCTION_FAILED; + } for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); @@ -8237,7 +8534,14 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, fwdt->length = 0; dcode = (uint32_t *)req->ring; - qla24xx_read_flash_data(vha, dcode, faddr, 7); + + rval = qla24xx_read_flash_data(vha, dcode, faddr, 7); + if (rval) { + ql_log(ql_log_fatal, vha, 0x016c, + "-> Unable to read template size.\n"); + goto failed; + } + risc_size = be32_to_cpu((__force __be32)dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", @@ -8255,7 +8559,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_dbg(ql_dbg_init, vha, 0x0163, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); - fwdt->template = vmalloc(risc_size * sizeof(*dcode)); + fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0164, "-> fwdt%u failed allocate template.\n", j); @@ -8263,11 +8567,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, } dcode = fwdt->template; - qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + rval = qla24xx_read_flash_data(vha, dcode, faddr, risc_size); - if (!qla27xx_fwdt_template_valid(dcode)) { + if (rval || !qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0165, - "-> fwdt%u failed template validate\n", j); + "-> fwdt%u failed template validate (rval %x)\n", + j, rval); goto failed; } @@ -8299,8 +8604,6 @@ failed: return QLA_SUCCESS; } -#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" - int qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { @@ -8318,8 +8621,6 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (!blob) { ql_log(ql_log_info, vha, 0x0083, "Firmware image unavailable.\n"); - ql_log(ql_log_info, vha, 0x0084, - "Firmware images can be retrieved from: "QLA_FW_URL ".\n"); return QLA_FUNCTION_FAILED; } @@ -8510,7 +8811,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ql_dbg(ql_dbg_init, vha, 0x0173, "-> fwdt%u template allocate template %#x words...\n", j, risc_size); - fwdt->template = vmalloc(risc_size * sizeof(*dcode)); + fwdt->template = vmalloc_array(risc_size, sizeof(*dcode)); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x0174, "-> fwdt%u failed allocate template.\n", j); @@ -9411,6 +9712,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->vp_idx = vp_idx; qpair->fw_started = ha->flags.fw_started; INIT_LIST_HEAD(&qpair->hints_list); + INIT_LIST_HEAD(&qpair->dsd_list); qpair->chip_reset = ha->base_qpair->chip_reset; qpair->enable_class_2 = ha->base_qpair->enable_class_2; qpair->enable_explicit_conf = @@ -9461,8 +9763,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; - /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, raw_smp_processor_id()); + + if (!qpair->cpu_mapped) + qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) @@ -9477,6 +9780,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, goto fail_mempool; } + if (qla_create_buf_pool(vha, qpair)) { + ql_log(ql_log_warn, vha, 0xd036, + "Failed to initialize buf pool for qpair %d\n", + qpair->id); + goto fail_bufpool; + } + /* Mark as online */ qpair->online = 1; @@ -9492,7 +9802,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, } return qpair; +fail_bufpool: + mempool_destroy(qpair->srb_mempool); fail_mempool: + qla25xx_delete_req_que(vha, qpair->req); fail_req: qla25xx_delete_rsp_que(vha, qpair->rsp); fail_rsp: @@ -9518,6 +9831,8 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) qpair->delete_in_progress = 1; + qla_free_buf_pool(qpair); + ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; @@ -9526,6 +9841,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) if (ret != QLA_SUCCESS) goto fail; + if (!list_empty(&qpair->dsd_list)) { + struct dsd_dma *dsd_ptr, *tdsd_ptr; + + /* clean up allocated prev pool */ + list_for_each_entry_safe(dsd_ptr, tdsd_ptr, + &qpair->dsd_list, list) { + dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, + dsd_ptr->dsd_list_dma); + list_del(&dsd_ptr->list); + kfree(dsd_ptr); + } + } + mutex_lock(&ha->mq_lock); ha->queue_pair_map[qpair->id] = NULL; clear_bit(qpair->id, ha->qpair_qid_map); |
