diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_mid.c')
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_mid.c | 736 |
1 files changed, 589 insertions, 147 deletions
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f868a9f98afe..0abc47e72e0b 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1,8 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation - * - * See LICENSE.qla2xxx for copyright and licensing details. + * Copyright (c) 2003-2014 QLogic Corporation */ #include "qla_def.h" #include "qla_gbl.h" @@ -21,7 +20,7 @@ void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { if (vha->vp_idx && vha->timer_active) { - del_timer_sync(&vha->timer); + timer_delete_sync(&vha->timer); vha->timer_active = 0; } } @@ -50,11 +49,12 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); - - qlt_update_vp_map(vha, SET_VP_IDX); - spin_unlock_irqrestore(&ha->vport_slock, flags); + spin_lock_irqsave(&ha->hardware_lock, flags); + qla_update_vp_map(vha, SET_VP_IDX); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + mutex_unlock(&ha->vport_lock); return vp_id; } @@ -65,6 +65,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; + u32 i, bailout; mutex_lock(&ha->vport_lock); /* @@ -74,17 +75,29 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - spin_lock_irqsave(&ha->vport_slock, flags); - while (atomic_read(&vha->vref_count)) { + bailout = 0; + for (i = 0; i < 500; i++) { + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count) == 0) { + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + bailout = 1; + } spin_unlock_irqrestore(&ha->vport_slock, flags); - msleep(500); - + if (bailout) + break; + else + msleep(20); + } + if (!bailout) { + ql_log(ql_log_info, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); spin_lock_irqsave(&ha->vport_slock, flags); + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + spin_unlock_irqrestore(&ha->vport_slock, flags); } - list_del(&vha->list); - qlt_update_vp_map(vha, RESET_VP_IDX); - spin_unlock_irqrestore(&ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts--; @@ -141,7 +154,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vha->vp_idx); - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } @@ -150,16 +163,32 @@ int qla24xx_disable_vp(scsi_qla_host_t *vha) { unsigned long flags; - int ret; + int ret = QLA_SUCCESS; + fc_port_t *fcport; + + if (vha->hw->flags.edif_enabled) { + if (DBELL_ACTIVE(vha)) + qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, + FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); + /* delete sessions and flush sa_indexes */ + qla2x00_wait_for_sess_deletion(vha); + } + + if (vha->hw->flags.fw_started) + ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 1; + + if (!vha->hw->flags.edif_enabled) + qla2x00_wait_for_sess_deletion(vha); /* Remove port id from vp target map */ - spin_lock_irqsave(&vha->hw->vport_slock, flags); - qlt_update_vp_map(vha, RESET_AL_PA); - spin_unlock_irqrestore(&vha->hw->vport_slock, flags); + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + qla_update_vp_map(vha, RESET_AL_PA); + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); qla2x00_mark_vp_devices_dead(vha); atomic_set(&vha->vp_state, VP_FAILED); @@ -186,6 +215,11 @@ qla24xx_enable_vp(scsi_qla_host_t *vha) !(ha->current_topology & ISP_CFG_F)) { vha->vp_err_state = VP_ERR_PORTDWN; fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); + ql_dbg(ql_dbg_taskm, vha, 0x800b, + "%s skip enable. loop_state %x topo %x\n", + __func__, base_vha->loop_state.counter, + ha->current_topology); + goto enable_failed; } @@ -240,14 +274,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { - scsi_qla_host_t *vha; + scsi_qla_host_t *vha, *tvp; struct qla_hw_data *ha = rsp->hw; int i = 0; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vha, &ha->vp_list, list) { + list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { if (vha->vp_idx) { + if (test_bit(VPORT_DELETE, &vha->dpc_flags)) + continue; + atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -258,17 +295,25 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) case MBA_LIP_RESET: case MBA_POINT_TO_POINT: case MBA_CHG_IN_CONNECTION: - case MBA_PORT_UPDATE: - case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_async, vha, 0x5024, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; + case MBA_PORT_UPDATE: + case MBA_RSCN_UPDATE: + if ((mb[3] & 0xff) == vha->vp_idx) { + ql_dbg(ql_dbg_async, vha, 0x5024, + "Async_event for VP[%d], mb=0x%x vha=%p\n", + i, *mb, vha); + qla2x00_async_event(vha, rsp, mb); + } + break; } spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); } i++; } @@ -278,67 +323,80 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { + fc_port_t *fcport; + + /* + * To exclusively reset vport, we need to log it out first. + * Note: This control_vp can fail if ISP reset is already + * issued, this is expected, as the vp would be already + * logged out due to ISP reset. + */ + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + } + /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } - /* - * To exclusively reset vport, we need to log it out first. Note: this - * control_vp can fail if ISP reset is already issued, this is - * expected, as the vp would be already logged out due to ISP reset. - */ - if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) - qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); + return qla24xx_enable_vp(vha); } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); - qla2x00_do_work(vha); - - if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { - /* VP acquired. complete port configuration */ - ql_dbg(ql_dbg_dpc, vha, 0x4014, - "Configure VP scheduled.\n"); - qla24xx_configure_vp(vha); - ql_dbg(ql_dbg_dpc, vha, 0x4015, - "Configure VP end.\n"); - return 0; + /* Check if Fw is ready to configure VP first */ + if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { + if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { + /* VP acquired. complete port configuration */ + ql_dbg(ql_dbg_dpc, vha, 0x4014, + "Configure VP scheduled.\n"); + qla24xx_configure_vp(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4015, + "Configure VP end.\n"); + return 0; + } } - if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { - ql_dbg(ql_dbg_dpc, vha, 0x4016, - "FCPort update scheduled.\n"); - qla2x00_update_fcports(vha); - clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); - ql_dbg(ql_dbg_dpc, vha, 0x4017, - "FCPort update end.\n"); + if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) { + if (atomic_read(&vha->loop_state) == LOOP_READY) { + qla24xx_process_purex_list(&vha->purex_list); + clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); + } } - if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && - !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && - atomic_read(&vha->loop_state) != LOOP_DOWN) { + if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && + !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && + atomic_read(&vha->loop_state) != LOOP_DOWN) { - ql_dbg(ql_dbg_dpc, vha, 0x4018, - "Relogin needed scheduled.\n"); - qla2x00_relogin(vha); - ql_dbg(ql_dbg_dpc, vha, 0x4019, - "Relogin needed end.\n"); + if (!vha->relogin_jif || + time_after_eq(jiffies, vha->relogin_jif)) { + vha->relogin_jif = jiffies + HZ; + clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); + + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "Relogin needed scheduled.\n"); + qla24xx_post_relogin_work(vha); + } } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && @@ -365,9 +423,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { - int ret; struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; + scsi_qla_host_t *vp, *tvp; unsigned long flags = 0; if (vha->vp_idx) @@ -381,12 +438,12 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) return; spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - ret = qla2x00_do_dpc_vp(vp); + qla2x00_do_dpc_vp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); @@ -439,7 +496,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; - struct scsi_host_template *sht = &qla2xxx_driver_template; + const struct scsi_host_template *sht = &qla2xxx_driver_template; struct Scsi_Host *host; vha = qla2x00_create_host(sht, ha); @@ -449,6 +506,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) return(NULL); } + vha->irq_offset = QLA_BASE_VECTORS; host = vha->host; fc_vport->dd_data = vha; /* New host info */ @@ -463,9 +521,12 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) "Couldn't allocate vp_id.\n"); goto create_vhost_failed; } - vha->mgmt_svr_loop_id = 10 + vha->vp_idx; + vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); vha->dpc_flags = 0L; + ha->dpc_active = 0; + set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); /* * To fix the issue of processing a parent's RSCN for the vport before @@ -475,9 +536,10 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); + qla2x00_start_timer(vha, WATCH_INTERVAL); vha->req = base_vha->req; + vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; host->can_queue = base_vha->req->length + 128; host->cmd_per_lun = 3; if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) @@ -525,7 +587,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) } kfree(req->outstanding_cmds); kfree(req); - req = NULL; } static void @@ -535,9 +596,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) uint16_t que_id = rsp->id; if (rsp->msix && rsp->msix->have_irq) { - free_irq(rsp->msix->vector, rsp); + free_irq(rsp->msix->vector, rsp->msix->handle); rsp->msix->have_irq = 0; - rsp->msix->rsp = NULL; + rsp->msix->in_use = 0; + rsp->msix->handle = NULL; } dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); @@ -550,35 +612,38 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mutex_unlock(&ha->vport_lock); } kfree(rsp); - rsp = NULL; } int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { - int ret = -1; + int ret = QLA_SUCCESS; - if (req) { + if (req && vha->flags.qpairs_req_created) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_req_que(vha, req); + } return ret; } -static int +int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { - int ret = -1; + int ret = QLA_SUCCESS; - if (rsp) { + if (rsp && vha->flags.qpairs_rsp_created) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); - } - if (ret == QLA_SUCCESS) + if (ret != QLA_SUCCESS) + return QLA_FUNCTION_FAILED; + qla25xx_free_rsp_que(vha, rsp); + } return ret; } @@ -591,46 +656,55 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; + struct qla_qpair *qpair, *tqpair; - /* Delete request queues */ - for (cnt = 1; cnt < ha->max_req_queues; cnt++) { - req = ha->req_q_map[cnt]; - if (req) { - ret = qla25xx_delete_req_que(vha, req); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00ea, - "Couldn't delete req que %d.\n", - req->id); - return ret; + if (ql2xmqsupport || ql2xnvmeenable) { + list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, + qp_list_elem) + qla2xxx_delete_qpair(vha, qpair); + } else { + /* Delete request queues */ + for (cnt = 1; cnt < ha->max_req_queues; cnt++) { + req = ha->req_q_map[cnt]; + if (req && test_bit(cnt, ha->req_qid_map)) { + ret = qla25xx_delete_req_que(vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00ea, + "Couldn't delete req que %d.\n", + req->id); + return ret; + } } } - } - /* Delete response queues */ - for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; - if (rsp) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x00eb, - "Couldn't delete rsp que %d.\n", - rsp->id); - return ret; + /* Delete response queues */ + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x00eb, + "Couldn't delete rsp que %d.\n", + rsp->id); + return ret; + } } } } + return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) + uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; - device_reg_t __iomem *reg; + device_reg_t *reg; uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); @@ -654,10 +728,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (ret != QLA_SUCCESS) goto que_failed; - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00db, "No resources to create additional request queue.\n"); goto que_failed; @@ -699,8 +773,11 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; - mutex_unlock(&ha->vport_lock); + req->out_ptr = (uint16_t *)(req->ring + req->length); + mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", @@ -712,14 +789,17 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); - ret = qla25xx_init_req_que(base_vha, req); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_fatal, base_vha, 0x00df, - "%s failed.\n", __func__); - mutex_lock(&ha->vport_lock); - clear_bit(que_id, ha->req_qid_map); - mutex_unlock(&ha->vport_lock); - goto que_failed; + if (startqp) { + ret = qla25xx_init_req_que(base_vha, req); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00df, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->req_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } + vha->flags.qpairs_req_created = 1; } return req->id; @@ -733,26 +813,26 @@ failed: static void qla_do_work(struct work_struct *work) { unsigned long flags; - struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); - struct scsi_qla_host *vha; - struct qla_hw_data *ha = rsp->hw; + struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); + struct scsi_qla_host *vha = qpair->vha; + + spin_lock_irqsave(&qpair->qp_lock, flags); + qla24xx_process_response_queue(vha, qpair->rsp); + spin_unlock_irqrestore(&qpair->qp_lock, flags); - spin_lock_irqsave(&rsp->hw->hardware_lock, flags); - vha = pci_get_drvdata(ha->pdev); - qla24xx_process_response_queue(vha, rsp); - spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int req) + uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) { int ret = 0; struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; - device_reg_t __iomem *reg; + device_reg_t *reg; rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { @@ -771,28 +851,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, goto que_failed; } - mutex_lock(&ha->vport_lock); + mutex_lock(&ha->mq_lock); que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { - mutex_unlock(&ha->vport_lock); + mutex_unlock(&ha->mq_lock); ql_log(ql_log_warn, base_vha, 0x00e2, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); - if (ha->flags.msix_enabled) - rsp->msix = &ha->msix_entries[que_id + 1]; - else - ql_log(ql_log_warn, base_vha, 0x00e3, - "MSIX not enalbled.\n"); + rsp->msix = qpair->msix; ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, - "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", + "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) @@ -804,42 +880,46 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (!IS_MSIX_NACK_CAPABLE(ha)) options |= BIT_6; + /* Set option to indicate response queue creation */ + options |= BIT_1; + rsp->options = options; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; rsp->rsp_q_out = ®->isp25mq.rsp_q_out; - mutex_unlock(&ha->vport_lock); + rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", + "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); - ret = qla25xx_request_irq(rsp); + ret = qla25xx_request_irq(ha, qpair, qpair->msix); if (ret) goto que_failed; - ret = qla25xx_init_rsp_que(base_vha, rsp); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_fatal, base_vha, 0x00e7, - "%s failed.\n", __func__); - mutex_lock(&ha->vport_lock); - clear_bit(que_id, ha->rsp_qid_map); - mutex_unlock(&ha->vport_lock); - goto que_failed; + if (startqp) { + ret = qla25xx_init_rsp_que(base_vha, rsp); + if (ret != QLA_SUCCESS) { + ql_log(ql_log_fatal, base_vha, 0x00e7, + "%s failed.\n", __func__); + mutex_lock(&ha->mq_lock); + clear_bit(que_id, ha->rsp_qid_map); + mutex_unlock(&ha->mq_lock); + goto que_failed; + } + vha->flags.qpairs_rsp_created = 1; } - if (req >= 0) - rsp->req = ha->req_q_map[req]; - else - rsp->req = NULL; + rsp->req = NULL; qla2x00_init_response_q_entries(rsp); - if (rsp->hw->wq) - INIT_WORK(&rsp->q_work, qla_do_work); + if (qpair->hw->wq) + INIT_WORK(&qpair->q_work, qla_do_work); return rsp->id; que_failed: @@ -847,3 +927,365 @@ que_failed: failed: return 0; } + +static void qla_ctrlvp_sp_done(srb_t *sp, int res) +{ + if (sp->comp) + complete(sp->comp); + /* don't free sp here. Let the caller do the free */ +} + +/** + * qla24xx_control_vp() - Enable a virtual port for given host + * @vha: adapter block pointer + * @cmd: command type to be sent for enable virtual port + * + * Return: qla2xxx local function return status code. + */ +int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) +{ + int rval = QLA_MEMORY_ALLOC_FAILED; + struct qla_hw_data *ha = vha->hw; + int vp_index = vha->vp_idx; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + DECLARE_COMPLETION_ONSTACK(comp); + srb_t *sp; + + ql_dbg(ql_dbg_vport, vha, 0x10c1, + "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); + + if (vp_index == 0 || vp_index >= ha->max_npiv_vports) + return QLA_PARAMETER_ERROR; + + /* ref: INIT */ + sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); + if (!sp) + return rval; + + sp->type = SRB_CTRL_VP; + sp->name = "ctrl_vp"; + sp->comp = ∁ + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla_ctrlvp_sp_done); + sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; + sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "%s: %s Failed submission. %x.\n", + __func__, sp->name, rval); + goto done; + } + + ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", + sp->name, sp->handle); + + wait_for_completion(&comp); + sp->comp = NULL; + + rval = sp->rc; + switch (rval) { + case QLA_FUNCTION_TIMEOUT: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", + __func__, sp->name, rval); + break; + case QLA_SUCCESS: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", + __func__, sp->name); + break; + default: + ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", + __func__, sp->name, rval); + break; + } +done: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + return rval; +} + +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == vp_idx) + return vha; + + BUG_ON(ha->vp_map == NULL); + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->vp_map[vp_idx].vha; + + return NULL; +} + +/* vport_slock to be held by the caller */ +void +qla_update_vp_map(struct scsi_qla_host *vha, int cmd) +{ + void *slot; + u32 key; + int rc; + + if (!vha->hw->vp_map) + return; + + key = vha->d_id.b24; + + switch (cmd) { + case SET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = vha; + break; + case SET_AL_PA: + slot = btree_lookup32(&vha->hw->host_map, key); + if (!slot) { + ql_dbg(ql_dbg_disc, vha, 0xf018, + "Save vha in host_map %p %06x\n", vha, key); + rc = btree_insert32(&vha->hw->host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xd03e, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_disc, vha, 0xf019, + "replace existing vha in host_map %p %06x\n", vha, key); + btree_update32(&vha->hw->host_map, key, vha); + break; + case RESET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = NULL; + break; + case RESET_AL_PA: + ql_dbg(ql_dbg_disc, vha, 0xf01a, + "clear vha in host_map %p %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->host_map, key); + if (slot) + btree_remove32(&vha->hw->host_map, key); + vha->d_id.b24 = 0; + break; + } +} + +void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + + if (!vha->d_id.b24) { + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } else if (vha->d_id.b24 != id.b24) { + qla_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } +} + +int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) +{ + int sz; + + qp->buf_pool.num_bufs = qp->req->length; + + sz = BITS_TO_LONGS(qp->req->length); + qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); + if (!qp->buf_pool.buf_map) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long)); + return -ENOMEM; + } + sz = qp->req->length * sizeof(void *); + qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL); + if (!qp->buf_pool.buf_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + return -ENOMEM; + } + sz = qp->req->length * sizeof(dma_addr_t); + qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); + if (!qp->buf_pool.dma_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate dma_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + kfree(qp->buf_pool.buf_array); + return -ENOMEM; + } + set_bit(0, qp->buf_pool.buf_map); + return 0; +} + +void qla_free_buf_pool(struct qla_qpair *qp) +{ + int i; + struct qla_hw_data *ha = qp->vha->hw; + + for (i = 0; i < qp->buf_pool.num_bufs; i++) { + if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], + qp->buf_pool.dma_array[i]); + qp->buf_pool.buf_array[i] = NULL; + qp->buf_pool.dma_array[i] = 0; + } + + kfree(qp->buf_pool.dma_array); + kfree(qp->buf_pool.buf_array); + kfree(qp->buf_pool.buf_map); +} + +/* it is assume qp->qp_lock is held at this point */ +int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + u16 tag, i = 0; + void *buf; + dma_addr_t buf_dma; + struct qla_hw_data *ha = vha->hw; + + dsc->tag = TAG_FREED; +again: + tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); + if (tag >= qp->buf_pool.num_bufs) { + ql_dbg(ql_dbg_io, vha, 0x00e2, + "qp(%d) ran out of buf resource.\n", qp->id); + return -EIO; + } + if (tag == 0) { + set_bit(0, qp->buf_pool.buf_map); + i++; + if (i == 5) { + ql_dbg(ql_dbg_io, vha, 0x00e3, + "qp(%d) unable to get tag.\n", qp->id); + return -EIO; + } + goto again; + } + + if (!qp->buf_pool.buf_array[tag]) { + buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); + if (!buf) { + ql_log(ql_log_fatal, vha, 0x13b1, + "Failed to allocate buf.\n"); + return -ENOMEM; + } + + dsc->buf = qp->buf_pool.buf_array[tag] = buf; + dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; + qp->buf_pool.num_alloc++; + } else { + dsc->buf = qp->buf_pool.buf_array[tag]; + dsc->buf_dma = qp->buf_pool.dma_array[tag]; + memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); + } + + qp->buf_pool.num_active++; + if (qp->buf_pool.num_active > qp->buf_pool.max_used) + qp->buf_pool.max_used = qp->buf_pool.num_active; + + dsc->tag = tag; + set_bit(tag, qp->buf_pool.buf_map); + return 0; +} + +static void qla_trim_buf(struct qla_qpair *qp, u16 trim) +{ + int i, j; + struct qla_hw_data *ha = qp->vha->hw; + + if (!trim) + return; + + for (i = 0; i < trim; i++) { + j = qp->buf_pool.num_alloc - 1; + if (test_bit(j, qp->buf_pool.buf_map)) { + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, + "QP id(%d): trim active buf[%d]. Remain %d bufs\n", + qp->id, j, qp->buf_pool.num_alloc); + return; + } + + if (qp->buf_pool.buf_array[j]) { + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], + qp->buf_pool.dma_array[j]); + qp->buf_pool.buf_array[j] = NULL; + qp->buf_pool.dma_array[j] = 0; + } + qp->buf_pool.num_alloc--; + if (!qp->buf_pool.num_alloc) + break; + } + ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, + "QP id(%d): trimmed %d bufs. Remain %d bufs\n", + qp->id, trim, qp->buf_pool.num_alloc); +} + +static void __qla_adjust_buf(struct qla_qpair *qp) +{ + u32 trim; + + qp->buf_pool.take_snapshot = 0; + qp->buf_pool.prev_max = qp->buf_pool.max_used; + qp->buf_pool.max_used = qp->buf_pool.num_active; + + if (qp->buf_pool.prev_max > qp->buf_pool.max_used && + qp->buf_pool.num_alloc > qp->buf_pool.max_used) { + /* down trend */ + trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; + trim = (trim * 10) / 100; + trim = trim ? trim : 1; + qla_trim_buf(qp, trim); + } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { + /* 2 periods of no io */ + qla_trim_buf(qp, qp->buf_pool.num_alloc); + } +} + +/* it is assume qp->qp_lock is held at this point */ +void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + if (dsc->tag == TAG_FREED) + return; + lockdep_assert_held(qp->qp_lock_ptr); + + clear_bit(dsc->tag, qp->buf_pool.buf_map); + qp->buf_pool.num_active--; + dsc->tag = TAG_FREED; + + if (qp->buf_pool.take_snapshot) + __qla_adjust_buf(qp); +} + +#define EXPIRE (60 * HZ) +void qla_adjust_buf(struct scsi_qla_host *vha) +{ + unsigned long flags; + int i; + struct qla_qpair *qp; + + if (vha->vp_idx) + return; + + if (!vha->buf_expired) { + vha->buf_expired = jiffies + EXPIRE; + return; + } + if (time_before(jiffies, vha->buf_expired)) + return; + + vha->buf_expired = jiffies + EXPIRE; + + for (i = 0; i < vha->hw->num_qpairs; i++) { + qp = vha->hw->queue_pair_map[i]; + if (!qp) + continue; + if (!qp->buf_pool.num_alloc) + continue; + + if (qp->buf_pool.take_snapshot) { + /* no io has gone through in the last EXPIRE period */ + spin_lock_irqsave(qp->qp_lock_ptr, flags); + __qla_adjust_buf(qp); + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + } else { + qp->buf_pool.take_snapshot = 1; + } + } +} |
