summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/bnxt_re/qplib_fp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/qplib_fp.c')
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c895
1 files changed, 585 insertions, 310 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 96e581ced50e..c88f049136fc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -54,6 +54,10 @@
#include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
+#include <rdma/ib_addr.h>
+#include "bnxt_ulp.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
@@ -237,18 +241,15 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_hwq *hwq = &nq->hwq;
struct nq_base *nqe, **nq_ptr;
int budget = nq->budget;
- u32 sw_cons, raw_cons;
uintptr_t q_handle;
u16 type;
spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
- raw_cons = hwq->cons;
while (budget--) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
break;
/*
@@ -276,7 +277,8 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
default:
break;
}
- raw_cons++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
}
spin_unlock_bh(&hwq->lock);
}
@@ -300,22 +302,18 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
{
struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- int num_srqne_processed = 0;
- int num_cqne_processed = 0;
struct bnxt_qplib_cq *cq;
int budget = nq->budget;
- u32 sw_cons, raw_cons;
struct nq_base *nqe;
uintptr_t q_handle;
+ u32 hw_polled = 0;
u16 type;
spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
- raw_cons = hwq->cons;
while (budget--) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
- nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
break;
/*
@@ -329,6 +327,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_CQ_NOTIFICATION:
{
struct nq_cn *nqcne = (struct nq_cn *)nqe;
+ struct bnxt_re_cq *cq_p;
q_handle = le32_to_cpu(nqcne->cq_handle_low);
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
@@ -336,13 +335,18 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
if (!cq)
break;
+ cq->toggle = (le16_to_cpu(nqe->info10_type) &
+ NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
+ cq->dbinfo.toggle = cq->toggle;
+ cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
+ if (cq_p->uctx_cq_page)
+ *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
+
bnxt_qplib_armen_db(&cq->dbinfo,
DBC_DBC_TYPE_CQ_ARMENA);
spin_lock_bh(&cq->compl_lock);
atomic_set(&cq->arm_state, 0);
- if (!nq->cqn_handler(nq, (cq)))
- num_cqne_processed++;
- else
+ if (nq->cqn_handler(nq, (cq)))
dev_warn(&nq->pdev->dev,
"cqn - type 0x%x not handled\n", type);
cq->cnq_events++;
@@ -352,6 +356,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_SRQ_EVENT:
{
struct bnxt_qplib_srq *srq;
+ struct bnxt_re_srq *srq_p;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
@@ -359,13 +364,17 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
srq = (struct bnxt_qplib_srq *)q_handle;
+ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = srq->toggle;
+ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
+ if (srq_p->uctx_srq_page)
+ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
bnxt_qplib_armen_db(&srq->dbinfo,
DBC_DBC_TYPE_SRQ_ARMENA);
- if (!nq->srqn_handler(nq,
- (struct bnxt_qplib_srq *)q_handle,
- nqsrqe->event))
- num_srqne_processed++;
- else
+ if (nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
dev_warn(&nq->pdev->dev,
"SRQ event 0x%x not handled\n",
nqsrqe->event);
@@ -378,15 +387,33 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
"nqe with type = 0x%x not handled\n", type);
break;
}
- raw_cons++;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
}
- if (hwq->cons != raw_cons) {
- hwq->cons = raw_cons;
+ if (hw_polled)
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
- }
spin_unlock_bh(&hwq->lock);
}
+/* bnxt_re_synchronize_nq - self polling notification queue.
+ * @nq - notification queue pointer
+ *
+ * This function will start polling entries of a given notification queue
+ * for all pending entries.
+ * This function is useful to synchronize notification entries while resources
+ * are going away.
+ */
+
+void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
+{
+ int budget = nq->budget;
+
+ nq->budget = nq->hwq.max_elements;
+ bnxt_qplib_service_nq(&nq->nq_tasklet);
+ nq->budget = budget;
+}
+
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;
@@ -405,18 +432,22 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
{
- tasklet_disable(&nq->nq_tasklet);
+ if (!nq->requested)
+ return;
+
+ nq->requested = false;
/* Mask h/w interrupt */
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
/* Sync with last running IRQ handler */
synchronize_irq(nq->msix_vec);
+ irq_set_affinity_hint(nq->msix_vec, NULL);
+ free_irq(nq->msix_vec, nq);
+ kfree(nq->name);
+ nq->name = NULL;
+
if (kill)
tasklet_kill(&nq->nq_tasklet);
- if (nq->requested) {
- irq_set_affinity_hint(nq->msix_vec, NULL);
- free_irq(nq->msix_vec, nq);
- nq->requested = false;
- }
+ tasklet_disable(&nq->nq_tasklet);
}
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
@@ -442,6 +473,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init)
{
+ struct bnxt_qplib_res *res = nq->res;
int rc;
if (nq->requested)
@@ -453,10 +485,17 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
else
tasklet_enable(&nq->nq_tasklet);
- snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
+ nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
+ nq_indx, pci_name(res->pdev));
+ if (!nq->name)
+ return -ENOMEM;
rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
- if (rc)
+ if (rc) {
+ kfree(nq->name);
+ nq->name = NULL;
+ tasklet_disable(&nq->nq_tasklet);
return rc;
+ }
cpumask_clear(&nq->mask);
cpumask_set_cpu(nq_indx, &nq->mask);
@@ -467,7 +506,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
nq->msix_vec, nq_indx);
}
nq->requested = true;
- bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
return rc;
}
@@ -477,18 +516,17 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
resource_size_t reg_base;
struct bnxt_qplib_nq_db *nq_db;
struct pci_dev *pdev;
- int rc = 0;
pdev = nq->pdev;
nq_db = &nq->nq_db;
+ nq_db->dbinfo.flags = 0;
nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
if (!nq_db->reg.bar_base) {
dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
nq_db->reg.bar_id);
- rc = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
reg_base = nq_db->reg.bar_base + reg_offt;
@@ -498,15 +536,14 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
if (!nq_db->reg.bar_reg) {
dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
nq_db->reg.bar_id);
- rc = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
nq_db->dbinfo.db = nq_db->reg.bar_reg;
nq_db->dbinfo.hwq = &nq->hwq;
nq_db->dbinfo.xid = nq->ring_id;
-fail:
- return rc;
+
+ return 0;
}
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
@@ -514,11 +551,12 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
cqn_handler_t cqn_handler,
srqn_handler_t srqn_handler)
{
- int rc = -1;
+ int rc;
nq->pdev = pdev;
nq->cqn_handler = cqn_handler;
nq->srqn_handler = srqn_handler;
+ nq->load = 0;
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
@@ -581,18 +619,20 @@ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_destroy_srq req;
- struct creq_destroy_srq_resp resp;
- u16 cmd_flags = 0;
+ struct creq_destroy_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_srq req = {};
int rc;
- RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_SRQ,
+ sizeof(req));
/* Configure the request */
req.srq_cid = cpu_to_le32(srq->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
- (struct creq_base *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
kfree(srq->swq);
if (rc)
return;
@@ -604,10 +644,10 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
- struct creq_create_srq_resp resp;
- struct cmdq_create_srq req;
+ struct creq_create_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_srq req = {};
struct bnxt_qplib_pbl *pbl;
- u16 cmd_flags = 0;
u16 pg_sz_lvl;
int rc, idx;
@@ -618,16 +658,11 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
- goto exit;
-
- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
- GFP_KERNEL);
- if (!srq->swq) {
- rc = -ENOMEM;
- goto fail;
- }
-
- RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
+ return rc;
+ srq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_SRQ,
+ sizeof(req));
/* Configure the request */
req.dpi = cpu_to_le32(srq->dpi->dpi);
@@ -644,82 +679,73 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
req.pd_id = cpu_to_le32(srq->pd->id);
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto fail;
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
- for (idx = 0; idx < srq->hwq.max_elements; idx++)
- srq->swq[idx].next_idx = idx + 1;
- srq->swq[srq->last_idx].next_idx = -1;
+ if (!srq->hwq.is_user) {
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
srq->dbinfo.xid = srq->id;
srq->dbinfo.db = srq->dpi->dbr;
srq->dbinfo.max_slot = 1;
- srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
- if (srq->threshold)
- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
- srq->arm_req = false;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
return 0;
fail:
bnxt_qplib_free_hwq(res, &srq->hwq);
kfree(srq->swq);
-exit:
- return rc;
-}
-
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq)
-{
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- u32 sw_prod, sw_cons, count = 0;
-
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
-
- count = sw_prod > sw_cons ? sw_prod - sw_cons :
- srq_hwq->max_elements - sw_cons + sw_prod;
- if (count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- } else {
- /* Deferred arming */
- srq->arm_req = true;
- }
- return 0;
+ return rc;
}
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_query_srq req;
- struct creq_query_srq_resp resp;
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_srq_resp_sb *sb;
- u16 cmd_flags = 0;
- int rc = 0;
+ struct cmdq_query_srq req = {};
+ int rc;
- RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_SRQ,
+ sizeof(req));
/* Configure the request */
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.srq_cid = cpu_to_le32(srq->id);
- sb = sbuf->sb;
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
- (void *)sbuf, 0);
- srq->threshold = le16_to_cpu(sb->srq_limit);
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ sb = sbuf.sb;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -730,23 +756,20 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
struct rq_wqe *srqe;
struct sq_sge *hw_sge;
- u32 sw_prod, sw_cons, count = 0;
- int i, rc = 0, next;
+ int i, next;
spin_lock(&srq_hwq->lock);
if (srq->start_idx == srq->last_idx) {
dev_err(&srq_hwq->pdev->dev,
"FP: SRQ (0x%x) is full!\n", srq->id);
- rc = -EINVAL;
spin_unlock(&srq_hwq->lock);
- goto done;
+ return -EINVAL;
}
next = srq->start_idx;
srq->start_idx = srq->swq[next].next_idx;
spin_unlock(&srq_hwq->lock);
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
+ srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
memset(srqe, 0, srq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
@@ -762,68 +785,53 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srqe->wr_id[0] = cpu_to_le32((u32)next);
srq->swq[next].wr_id = wqe->wr_id;
- srq_hwq->prod++;
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
- spin_lock(&srq_hwq->lock);
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- /* retaining srq_hwq->cons for this logic
- * actually the lock is only required to
- * read srq_hwq->cons.
- */
- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
- count = sw_prod > sw_cons ? sw_prod - sw_cons :
- srq_hwq->max_elements - sw_cons + sw_prod;
- spin_unlock(&srq_hwq->lock);
/* Ring DB */
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
- if (srq->arm_req == true && count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- }
-done:
- return rc;
+
+ return 0;
}
/* QP */
static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
{
- int rc = 0;
int indx;
- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
- if (!que->swq) {
- rc = -ENOMEM;
- goto out;
- }
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
+ if (!que->swq)
+ return -ENOMEM;
que->swq_start = 0;
- que->swq_last = que->max_wqe - 1;
- for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
-out:
- return rc;
+
+ return 0;
}
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_qp1_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- struct creq_create_qp1_resp resp;
- struct cmdq_create_qp1 req;
+ struct cmdq_create_qp1 req = {};
struct bnxt_qplib_pbl *pbl;
- u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
u32 tbl_indx;
int rc;
- RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
-
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP1,
+ sizeof(req));
/* General */
req.type = qp->type;
req.dpi = cpu_to_le32(qp->dpi->dpi);
@@ -833,11 +841,11 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
- goto exit;
+ return rc;
rc = bnxt_qplib_alloc_init_swq(sq);
if (rc)
@@ -857,10 +865,11 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
+ rq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -891,8 +900,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.qp_flags = cpu_to_le32(qp_flags);
req.pd_id = cpu_to_le32(qp->pd->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto fail;
@@ -925,7 +934,6 @@ sq_swq:
kfree(sq->swq);
fail_sq:
bnxt_qplib_free_hwq(res, &sq->hwq);
-exit:
return rc;
}
@@ -952,20 +960,24 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
+ struct creq_create_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- struct creq_create_qp_resp resp;
+ struct cmdq_create_qp req = {};
int rc, req_size, psn_sz = 0;
struct bnxt_qplib_hwq *xrrq;
struct bnxt_qplib_pbl *pbl;
- struct cmdq_create_qp req;
- u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
u32 tbl_indx;
u16 nsge;
- RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP,
+ sizeof(req));
/* General */
req.type = qp->type;
@@ -974,29 +986,48 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* SQ */
if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
+
+ if (qp->is_host_msn_tbl) {
+ psn_sz = sizeof(struct sq_msn_search);
+ qp->msn = 0;
+ }
}
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
- hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
+ /* Update msn tbl size */
+ if (qp->is_host_msn_tbl && psn_sz) {
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
- goto exit;
-
- rc = bnxt_qplib_alloc_init_swq(sq);
- if (rc)
- goto fail_sq;
+ return rc;
- if (psn_sz)
- bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ if (!sq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1011,19 +1042,22 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (!qp->srq) {
+ rq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
- rc = bnxt_qplib_alloc_init_swq(rq);
- if (rc)
- goto fail_rq;
+ if (!rq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ }
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1051,7 +1085,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
@@ -1098,8 +1132,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
req.pd_id = cpu_to_le32(qp->pd->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto fail;
@@ -1118,9 +1153,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@@ -1135,7 +1172,6 @@ sq_swq:
kfree(sq->swq);
fail_sq:
bnxt_qplib_free_hwq(res, &sq->hwq);
-exit:
return rc;
}
@@ -1153,8 +1189,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
@@ -1228,20 +1262,73 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
}
}
+static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp,
+ struct cmdq_modify_qp *req)
+{
+ u32 mandatory_flags = 0;
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
+
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
+ req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ }
+
+ if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ }
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
+ qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+
+ qp->modify_flags |= mandatory_flags;
+ req->qp_type = qp->type;
+}
+
+static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
+{
+ if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
+ return true;
+
+ return false;
+}
+
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_modify_qp req;
- struct creq_modify_qp_resp resp;
- u16 cmd_flags = 0;
+ struct creq_modify_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_qp req = {};
+ u16 vlan_pcp_vlan_dei_vlan_id;
u32 temp32[4];
u32 bmask;
int rc;
- RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_MODIFY_QP,
+ sizeof(req));
/* Filter out the qp_attr_mask based on the state->new transition */
__filter_modify_flags(qp);
+ if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
+ /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
+ if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
+ is_optimized_state_transition(qp))
+ bnxt_set_mandatory_attributes(res, qp, &req);
+ }
bmask = qp->modify_flags;
req.modify_mask = cpu_to_le32(qp->modify_flags);
req.qp_cid = cpu_to_le32(qp->id);
@@ -1272,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
req.flow_label = cpu_to_le32(qp->ah.flow_label);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
- req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
- [qp->ah.sgid_index]);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
+ else
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
+ }
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
req.hop_limit = qp->ah.hop_limit;
@@ -1286,7 +1378,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
memcpy(req.dest_mac, qp->ah.dmac, 6);
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
- req.path_mtu = qp->path_mtu;
+ req.path_mtu_pingpong_push_enable |= qp->path_mtu;
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
req.timeout = qp->timeout;
@@ -1322,10 +1414,19 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
- req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
+ vlan_pcp_vlan_dei_vlan_id =
+ ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
+ CMDQ_MODIFY_QP_VLAN_ID_SFT) &
+ CMDQ_MODIFY_QP_VLAN_ID_MASK);
+ vlan_pcp_vlan_dei_vlan_id |=
+ ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
+ CMDQ_MODIFY_QP_VLAN_PCP_MASK);
+ req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
+ }
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
return rc;
qp->cur_qp_state = qp->state;
@@ -1335,36 +1436,41 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_query_qp req;
- struct creq_query_qp_resp resp;
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_qp_resp_sb *sb;
- u16 cmd_flags = 0;
+ struct cmdq_query_qp req = {};
u32 temp32[4];
- int i, rc = 0;
-
- RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
+ int i, rc;
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- sb = sbuf->sb;
+ sb = sbuf.sb;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_QP,
+ sizeof(req));
req.qp_cid = cpu_to_le32(qp->id);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
- (void *)sbuf, 0);
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto bail;
/* Extract the context from the side buffer */
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
- CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
- true : false;
+ CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
+ qp->udp_sport = le16_to_cpu(sb->udp_src_port);
temp32[0] = le32_to_cpu(sb->dgid[0]);
temp32[1] = le32_to_cpu(sb->dgid[1]);
@@ -1410,20 +1516,25 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
+ u32 peek_flags, peek_cons;
struct cq_base *hw_cqe;
int i;
+ peek_flags = cq->dbinfo.flags;
+ peek_cons = cq_hwq->cons;
for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
- if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
+ if (!CQE_CMP_VALID(hw_cqe, peek_flags))
continue;
/*
* The valid test of the entry must be done first before
@@ -1453,6 +1564,8 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
default:
break;
}
+ bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
+ 1, &peek_flags);
}
}
@@ -1460,24 +1573,31 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_destroy_qp req;
- struct creq_destroy_qp_resp resp;
- u16 cmd_flags = 0;
+ struct creq_destroy_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_qp req = {};
u32 tbl_indx;
int rc;
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
+ spin_unlock_bh(&rcfw->tbl_lock);
- RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_QP,
+ sizeof(req));
req.qp_cid = cpu_to_le32(qp->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc) {
+ spin_lock_bh(&rcfw->tbl_lock);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return rc;
}
@@ -1551,6 +1671,27 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+/* Fil the MSN table into the next psn row */
+static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_msn_search *msns;
+ u32 start_psn, next_psn;
+ u16 start_idx;
+
+ msns = (struct sq_msn_search *)swq->psn_search;
+ msns->start_idx_next_psn_start_psn = 0;
+
+ start_psn = swq->start_psn;
+ next_psn = swq->next_psn;
+ start_idx = swq->slot_idx;
+ msns->start_idx_next_psn_start_psn |=
+ bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
+ qp->msn++;
+ qp->msn %= qp->msn_tbl_sz;
+}
+
static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe,
struct bnxt_qplib_swq *swq)
@@ -1562,6 +1703,12 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
if (!swq->psn_search)
return;
+ /* Handle MSN differently on cap flags */
+ if (qp->is_host_msn_tbl) {
+ bnxt_qplib_fill_msn_search(qp, wqe, swq);
+ return;
+ }
+ psns = (struct sq_psn_search *)swq->psn_search;
psns = swq->psn_search;
psns_ext = swq->psn_ext;
@@ -1572,7 +1719,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK);
- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
@@ -1582,9 +1729,9 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
}
}
-static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_swqe *wqe,
- u16 *idx)
+static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u32 *idx)
{
struct bnxt_qplib_hwq *hwq;
int len, t_len, offt;
@@ -1601,7 +1748,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
il_src = (void *)wqe->sg_list[indx].addr;
t_len += len;
if (t_len > qp->max_inline_data)
- goto bad;
+ return BNXT_RE_INVAL_MSG_SIZE;
while (len) {
if (pull_dst) {
pull_dst = false;
@@ -1625,13 +1772,11 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
}
return t_len;
-bad:
- return -ENOMEM;
}
-static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sge *ssge,
- u16 nsge, u16 *idx)
+static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_sge *ssge,
+ u32 nsge, u32 *idx)
{
struct sq_sge *dsge;
int indx, len = 0;
@@ -1672,8 +1817,8 @@ static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
return slot;
}
-static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
- struct bnxt_qplib_swq *swq)
+static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
+ struct bnxt_qplib_swq *swq, bool hw_retx)
{
struct bnxt_qplib_hwq *hwq;
u32 pg_num, pg_indx;
@@ -1684,6 +1829,11 @@ static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
if (!hwq->pad_pg)
return;
tail = swq->slot_idx / sq->dbinfo.max_slot;
+ if (hw_retx) {
+ /* For HW retx use qp msn index */
+ tail = qp->msn;
+ tail %= qp->msn_tbl_sz;
+ }
pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
@@ -1707,13 +1857,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
+ u32 wqe_idx, slots, idx;
u16 wqe_sz, qdf = 0;
+ bool msn_update;
void *base_hdr;
void *ext_hdr;
__le32 temp32;
- u32 wqe_idx;
- u32 slots;
- u16 idx;
hwq = &sq->hwq;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
@@ -1735,7 +1884,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
- bnxt_qplib_pull_psn_buff(sq, swq);
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
idx = 0;
swq->slot_idx = hwq->prod;
@@ -1765,8 +1914,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
else
data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
&idx);
- if (data_len < 0)
- goto queue_err;
+ if (data_len > BNXT_RE_MAX_MSG_SIZE) {
+ rc = -EINVAL;
+ goto done;
+ }
+ /* Make sure we update MSN table only for wired wqes */
+ msn_update = true;
/* Specifics */
switch (wqe->type) {
case BNXT_QPLIB_SWQE_TYPE_SEND:
@@ -1807,6 +1960,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
SQ_SEND_DST_QP_MASK);
ext_sqe->avid = cpu_to_le32(wqe->send.avid &
SQ_SEND_AVID_MASK);
+ msn_update = false;
} else {
sqe->length = cpu_to_le32(data_len);
if (qp->mtu)
@@ -1864,7 +2018,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->wqe_type = wqe->type;
sqe->flags = wqe->flags;
sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
-
+ msn_update = false;
break;
}
case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
@@ -1896,6 +2050,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
PTU_PTE_VALID);
ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
ext_sqe->va = cpu_to_le64(wqe->frmr.va);
+ msn_update = false;
break;
}
@@ -1913,6 +2068,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->l_key = cpu_to_le32(wqe->bind.r_key);
ext_sqe->va = cpu_to_le64(wqe->bind.va);
ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
+ msn_update = false;
break;
}
default:
@@ -1920,11 +2076,13 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- swq->next_psn = sq->psn & BTH_PSN_MASK;
- bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ if (!qp->is_host_msn_tbl || msn_update) {
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ }
queue_err:
bnxt_qplib_swq_mod_start(sq, wqe_idx);
- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
+ bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
qp->wqe_cnt++;
done:
if (sch_handler) {
@@ -1960,8 +2118,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
- u16 wqe_sz, idx;
- u32 wqe_idx;
+ u32 wqe_idx, idx;
+ u16 wqe_sz;
int rc = 0;
hwq = &rq->hwq;
@@ -2012,7 +2170,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
queue_err:
bnxt_qplib_swq_mod_start(rq, wqe_idx);
- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
+ bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
done:
if (sch_handler) {
nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
@@ -2036,13 +2194,21 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
- struct creq_create_cq_resp resp;
+ struct creq_create_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_cq req = {};
struct bnxt_qplib_pbl *pbl;
- struct cmdq_create_cq req;
- u16 cmd_flags = 0;
+ u32 coalescing = 0;
u32 pg_sz_lvl;
int rc;
+ if (!cq->dpi) {
+ dev_err(&rcfw->pdev->dev,
+ "FP: CREATE_CQ failed due to NULL DPI\n");
+ return -EINVAL;
+ }
+
+ cq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.depth = cq->max_wqe;
hwq_attr.stride = sizeof(struct cq_base);
@@ -2050,18 +2216,35 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
hwq_attr.sginfo = &cq->sg_info;
rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
if (rc)
- goto exit;
+ return rc;
- RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_CQ,
+ sizeof(req));
- if (!cq->dpi) {
- dev_err(&rcfw->pdev->dev,
- "FP: CREATE_CQ failed due to NULL DPI\n");
- return -EINVAL;
- }
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
- req.cq_size = cpu_to_le32(cq->hwq.max_elements);
+ req.cq_size = cpu_to_le32(cq->max_wqe);
+
+ if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2) &&
+ cq->coalescing->enable) {
+ req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
+ coalescing |= ((cq->coalescing->buf_maxtime <<
+ CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
+ CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
+ coalescing |= ((cq->coalescing->normal_maxbuf <<
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
+ coalescing |= ((cq->coalescing->during_maxbuf <<
+ CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
+ if (cq->coalescing->en_ring_idle_mode)
+ coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ else
+ coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ req.coalescing = cpu_to_le32(coalescing);
+ }
+
pbl = &cq->hwq.pbl[PBL_LVL_0];
pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
CMDQ_CREATE_CQ_PG_SIZE_SFT);
@@ -2071,9 +2254,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
req.cq_fco_cnq_id = cpu_to_le32(
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
-
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto fail;
@@ -2088,7 +2271,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
cq->dbinfo.hwq = &cq->hwq;
cq->dbinfo.xid = cq->id;
cq->dbinfo.db = cq->dpi->dbr;
- cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
+ cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ cq->dbinfo.flags = 0;
+ cq->dbinfo.toggle = 0;
bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
@@ -2096,24 +2281,75 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
fail:
bnxt_qplib_free_hwq(res, &cq->hwq);
-exit:
+ return rc;
+}
+
+void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cq *cq)
+{
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
+ /* Reset only the cons bit in the flags */
+ cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
+}
+
+int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+ int new_cqes)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_resize_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_resize_cq req = {};
+ struct bnxt_qplib_pbl *pbl;
+ u32 pg_sz, lvl, new_sz;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_RESIZE_CQ,
+ sizeof(req));
+ hwq_attr.sginfo = &cq->sg_info;
+ hwq_attr.res = res;
+ hwq_attr.depth = new_cqes;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ req.cq_cid = cpu_to_le32(cq->id);
+ pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
+ pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
+ lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
+ CMDQ_RESIZE_CQ_LVL_MASK;
+ new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
+ CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
+ req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
+ req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
return rc;
}
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct cmdq_destroy_cq req;
- struct creq_destroy_cq_resp resp;
+ struct creq_destroy_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_cq req = {};
u16 total_cnq_events;
- u16 cmd_flags = 0;
int rc;
- RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DESTROY_CQ,
+ sizeof(req));
req.cq_cid = cpu_to_le32(cq->id);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
return rc;
total_cnq_events = le16_to_cpu(resp.total_cnq_events);
@@ -2151,7 +2387,8 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
cqe++;
(*budget)--;
skip_compl:
- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[last].slots, &sq->dbinfo.flags);
sq->swq_last = sq->swq[last].next_idx;
}
*pcqe = cqe;
@@ -2198,7 +2435,8 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
cqe->wr_id = rq->swq[last].wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ rq->swq[last].slots, &rq->dbinfo.flags);
rq->swq_last = rq->swq[last].next_idx;
}
*pcqe = cqe;
@@ -2227,7 +2465,7 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
{
- u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+ u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
struct bnxt_qplib_q *sq = &qp->sq;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
@@ -2258,16 +2496,14 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
}
if (sq->condition) {
/* Peek at the completions */
- peek_raw_cq_cons = cq->hwq.cons;
+ peek_flags = cq->dbinfo.flags;
peek_sw_cq_cons = cq_cons;
i = cq->hwq.max_elements;
while (i--) {
- peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
peek_sw_cq_cons, NULL);
/* If the next hwcqe is VALID */
- if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
- cq->hwq.max_elements)) {
+ if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
/*
* The valid test of the entry must be done first before
* reading any further.
@@ -2310,8 +2546,9 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
rc = -EINVAL;
goto out;
}
- peek_sw_cq_cons++;
- peek_raw_cq_cons++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
+ &peek_sw_cq_cons,
+ 1, &peek_flags);
}
dev_err(&cq->hwq.pdev->dev,
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
@@ -2322,6 +2559,32 @@ out:
return rc;
}
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget,
@@ -2329,9 +2592,10 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons, slot_num;
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
- u32 cqe_sq_cons;
+ int cqe_cons;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2343,12 +2607,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -2386,10 +2664,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
@@ -2398,7 +2678,8 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
}
skip:
- bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ swq->slots, &sq->dbinfo.flags);
sq->swq_last = swq->next_idx;
if (sq->single)
break;
@@ -2425,7 +2706,8 @@ static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
srq->swq[srq->last_idx].next_idx = (int)tag;
srq->last_idx = (int)tag;
srq->swq[srq->last_idx].next_idx = -1;
- srq->hwq.cons++; /* Support for SRQE counter */
+ bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
+ srq->dbinfo.max_slot, &srq->dbinfo.flags);
spin_unlock(&srq->hwq.lock);
}
@@ -2439,7 +2721,6 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
u32 wr_id_idx;
- int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
@@ -2450,7 +2731,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
- goto done;
+ return 0;
}
cqe = *pcqe;
@@ -2495,7 +2776,8 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2506,8 +2788,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
}
}
-done:
- return rc;
+ return 0;
}
static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
@@ -2520,7 +2801,6 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
u32 wr_id_idx;
- int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
@@ -2531,7 +2811,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
- goto done;
+ return 0;
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
@@ -2583,7 +2863,8 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2593,21 +2874,18 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
}
}
-done:
- return rc;
+
+ return 0;
}
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
struct cq_base *hw_cqe;
- u32 sw_cons, raw_cons;
bool rc = true;
- raw_cons = cq->hwq.cons;
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
/* Check for Valid bit. If the CQE is valid, return false */
- rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+ rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
return rc;
}
@@ -2621,7 +2899,6 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
- int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
@@ -2632,7 +2909,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
- goto done;
+ return 0;
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
@@ -2690,7 +2967,8 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2701,8 +2979,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
}
}
-done:
- return rc;
+ return 0;
}
static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
@@ -2724,11 +3001,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
- if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "FP: CQ Process terminal qp is NULL\n");
+ if (!qp)
return -EINVAL;
- }
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
@@ -2739,7 +3013,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
if (cqe_cons == 0xFFFF)
goto do_rq;
- cqe_cons %= sq->max_wqe;
+ cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
@@ -2767,7 +3041,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe++;
(*budget)--;
}
- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[swq_last].slots, &sq->dbinfo.flags);
sq->swq_last = sq->swq[swq_last].next_idx;
}
*pcqe = cqe;
@@ -2852,19 +3127,17 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe;
- u32 sw_cons, raw_cons;
int budget, rc = 0;
+ u32 hw_polled = 0;
u8 type;
- raw_cons = cq->hwq.cons;
budget = num_cqes;
while (budget) {
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
/* Check for Valid bit */
- if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
+ if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
break;
/*
@@ -2879,7 +3152,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
&cqe, &budget,
- sw_cons, lib_qp);
+ cq->hwq.cons, lib_qp);
break;
case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq,
@@ -2925,18 +3198,20 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
dev_err(&cq->hwq.pdev->dev,
"process_cqe error rc = 0x%x\n", rc);
}
- raw_cons++;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
+ 1, &cq->dbinfo.flags);
+
}
- if (cq->hwq.cons != raw_cons) {
- cq->hwq.cons = raw_cons;
+ if (hw_polled)
bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
- }
exit:
return num_cqes - budget;
}
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
+ cq->dbinfo.toggle = cq->toggle;
if (arm_type)
bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */