summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c162
1 files changed, 89 insertions, 73 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 1b08896b46d2..3e1bf22cba69 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
*
- * Copyright (C) 2020 Marvell.
*/
#include "cn10k.h"
@@ -13,6 +14,9 @@ static struct dev_hw_ops otx2_hw_ops = {
.sqe_flush = otx2_sqe_flush,
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -20,81 +24,72 @@ static struct dev_hw_ops cn10k_hw_ops = {
.sqe_flush = cn10k_sqe_flush,
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
+ .aura_aq_init = otx2_aura_aq_init,
+ .pool_aq_init = otx2_pool_aq_init,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+void otx2_init_hw_ops(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
-
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
- return 0;
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
+ pfvf->hw_ops = &cn10k_hw_ops;
+}
+EXPORT_SYMBOL(otx2_init_hw_ops);
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
+{
- pf->hw.lmt_base = ioremap(base, size);
+ struct lmtst_tbl_setup_req *req;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
- return -ENOMEM;
- }
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
+ return 0;
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
+ req->use_local_lmt_region = true;
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
}
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
-int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
- struct otx2_snd_queue *sq;
-
- sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
- (qidx * pfvf->nix_lmt_size));
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
@@ -105,11 +100,9 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/
- aq->sq.smq_rr_weight = pfvf->netdev->mtu;
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
+ aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@@ -128,52 +121,58 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
}
#define NPA_MAX_BURST 16
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
- int num_ptrs = 1;
+ struct otx2_pool *pool;
dma_addr_t bufptr;
+ int num_ptrs = 1;
+
+ pool = &pfvf->qset.pool[cq->cq_idx];
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
if (num_ptrs--)
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
num_ptrs = 1;
}
}
+ return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
+ struct otx2_lmt_info *lmt_info;
struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (lmt_info->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
*/
tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
dma_wmb();
- memcpy(sq->lmt_addr, sq->sqe_base, size);
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
cn10k_lmt_flush(val, tar_addr);
sq->head++;
@@ -223,6 +222,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
rsp = (struct nix_bandprof_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ rc = PTR_ERR(rsp);
+ goto out;
+ }
+
if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
rc = -EIO;
goto out;
@@ -337,6 +341,12 @@ int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
aq->rq.band_prof_id = policer;
aq->rq_mask.band_prof_id = GENMASK(9, 0);
+ /* If policer id is greater than 1023 then it implies hardware supports
+ * more leaf profiles. In that case use band_prof_id_h for 4 MSBs.
+ */
+ aq->rq.band_prof_id_h = policer >> 10;
+ aq->rq_mask.band_prof_id_h = GENMASK(3, 0);
+
/* Fill AQ info */
aq->qidx = rq_idx;
aq->ctype = NIX_AQ_CTYPE_RQ;
@@ -367,9 +377,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
@@ -470,6 +483,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
+ aq->prof.hl_en = 0;
+ aq->prof_mask.hl_en = 1;
+
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;