summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c173
1 files changed, 149 insertions, 24 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index c0e165dfc403..e2a33e46b48a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -1,13 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Admin Function driver
+/* Marvell RVU Admin Function driver
*
- * Copyright (C) 2018 Marvell International Ltd.
+ * Copyright (C) 2018 Marvell.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -45,15 +42,24 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NPA_AQ_COMP_GOOD)
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
-static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
- struct npa_aq_enq_rsp *rsp)
+int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -94,6 +100,11 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +149,10 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break;
}
- if (rc)
+ if (rc) {
+ spin_unlock(&aq->lock);
return rc;
-
- spin_lock(&aq->lock);
+ }
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +229,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
@@ -241,12 +254,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+ struct npa_aq_enq_req lock_ctx_req;
+ int err;
+
+ if (req->op != NPA_AQ_INSTOP_INIT)
+ return 0;
+
+ memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+ lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+ lock_ctx_req.ctype = req->ctype;
+ lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+ lock_ctx_req.aura_id = req->aura_id;
+ err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+ if (err)
+ dev_err(rvu->dev,
+ "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+ req->hdr.pcifunc,
+ (req->ctype == NPA_AQ_CTYPE_AURA) ?
+ "Aura" : "Pool", req->aura_id);
+ return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+ struct npa_aq_enq_req *req,
+ struct npa_aq_enq_rsp *rsp)
+{
+ int err;
+
+ err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+ if (!err)
+ err = npa_lf_hwctx_lockdown(rvu, req);
+ return err;
+}
+#else
+
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
+#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -289,6 +340,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
+ if (req->way_mask)
+ req->way_mask &= 0xFFFF;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
@@ -345,7 +399,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
- cfg |= (req->aura_sz << 16) | BIT_ULL(34);
+ cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
+
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
@@ -353,7 +408,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
- rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
+ rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
+ BIT_ULL(36) | req->way_mask << 20);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
@@ -369,6 +425,10 @@ exit:
rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
rsp->stack_pg_bytes = cfg & 0xFF;
rsp->qints = (cfg >> 28) & 0xFFF;
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ rsp->cache_lines = (cfg >> 1) & 0x3F;
+ }
return rc;
}
@@ -404,6 +464,23 @@ int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static void npa_aq_ndc_config(struct rvu *rvu, struct rvu_block *block)
+{
+ u64 cfg;
+
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return;
+
+ /* Do not bypass NDC cache */
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
+ cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+ /* Disable caching of stack pages */
+ cfg |= 0x10ULL;
+#endif
+ rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+}
+
static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
{
u64 cfg;
@@ -419,11 +496,15 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
#endif
- /* Do not bypass NDC cache */
- cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
- cfg &= ~0x03DULL;
- rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
+ npa_aq_ndc_config(rvu, block);
+ /* For CN10K NPA BATCH DMA set 35 cache lines */
+ if (!is_rvu_otx2(rvu)) {
+ cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
+ cfg &= ~0x7EULL;
+ cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
+ rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
+ }
/* Result structure can be followed by Aura/Pool context at
* RES + 128bytes and a write mask at RES + 256 bytes, depending on
* operation type. Alloc sufficient result memory for all operations.
@@ -443,18 +524,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr, err;
+ int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
/* Initialize admin queue */
- err = npa_aq_init(rvu, &hw->block[blkaddr]);
- if (err)
- return err;
-
- return 0;
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
}
void rvu_npa_freemem(struct rvu *rvu)
@@ -487,3 +564,51 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
npa_ctx_free(rvu, pfvf);
}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ if (is_cn20k(rvu->pdev)) /* NDC not applicable to cn20k */
+ return 0;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}