summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_debugfs.h
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-01-28 11:14:21 -0800
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-05 22:22:42 -0500
commitcdb42becdd40eeb320af3f21ac9a34e9d7517516 (patch)
treee72414894b82213f59141f78b83fcbbdfdf7bea2 /drivers/scsi/lpfc/lpfc_debugfs.h
parent7370d10ac99e8ebc5501c0fcdec482cb939ecbd4 (diff)
scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
Currently, both nvme and fcp each have their own concept of an io_channel, which is a combination wq/cq and associated msix. Different cpus would share an io_channel. The driver is now moving to per-cpu wq/cq pairs and msix vectors. The driver will still use separate wq/cq pairs per protocol on each cpu, but the protocols will share the msix vector. Given the elimination of the nvme and fcp io channels, the module parameters will be removed. A new parameter, lpfc_hdw_queue is added which allows the wq/cq pair allocation per cpu to be overridden and allocated to lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will be based on the number of cpus. If non-zero, the parameter specifies the number of queues to allocate. At this time, the maximum non-zero value is 64. To manage this new paradigm, a new hardware queue structure is created to track queue activity and relationships. As MSIX vector allocation must be known before setting up the relationships, msix allocation now occurs before queue datastructures are allocated. If the number of vectors allocated is less than the desired hardware queues, the hardware queue counts will be reduced to the number of vectors Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_debugfs.h')
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h65
1 files changed, 33 insertions, 32 deletions
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 30efc7bf91bd..2c5bc494b247 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -410,10 +410,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.fcp_wq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.nvme_wq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
@@ -454,14 +454,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
int eqidx;
/* fcp/nvme wq and cq are 1:1, thus same indexes */
+ eq = NULL;
if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.fcp_wq[wqidx];
- cq = phba->sli4_hba.fcp_cq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
qtypestr = "FCP";
} else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.nvme_wq[wqidx];
- cq = phba->sli4_hba.nvme_cq[wqidx];
+ wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
qtypestr = "NVME";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
@@ -478,17 +479,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
- for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
- if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
+ for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
+ eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
+ if (cq->assoc_qid == eq->queue_id)
break;
}
- if (eqidx == phba->io_channel_irqs) {
+ if (eqidx == phba->cfg_hdw_queue) {
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
eqidx = 0;
+ eq = phba->sli4_hba.hdwq[0].hba_eq;
}
- eq = phba->sli4_hba.hba_eq[eqidx];
-
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
@@ -516,7 +517,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
{
struct lpfc_queue *qp;
- qp = phba->sli4_hba.hba_eq[qidx];
+ qp = phba->sli4_hba.hdwq[qidx].hba_eq;
pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
@@ -564,21 +565,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
{
int wq_idx;
- for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
- if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+ for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+ if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
break;
- if (wq_idx < phba->cfg_fcp_io_channel) {
+ if (wq_idx < phba->cfg_hdw_queue) {
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
return;
}
- for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
- if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+ for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
+ if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
break;
- if (wq_idx < phba->cfg_nvme_io_channel) {
+ if (wq_idx < phba->cfg_hdw_queue) {
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
return;
}
@@ -646,23 +647,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
{
int cq_idx;
- for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
- if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+ for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+ if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
break;
- if (cq_idx < phba->cfg_fcp_io_channel) {
+ if (cq_idx < phba->cfg_hdw_queue) {
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
return;
}
- for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
- if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+ for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
+ if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
break;
- if (cq_idx < phba->cfg_nvme_io_channel) {
+ if (cq_idx < phba->cfg_hdw_queue) {
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
return;
}
@@ -697,13 +698,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
- for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
- if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+ for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
+ if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
break;
- if (eq_idx < phba->io_channel_irqs) {
+ if (eq_idx < phba->cfg_hdw_queue) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
return;
}
}