diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c')
| -rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 5025 |
1 files changed, 4359 insertions, 666 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4a7609fd6dd0..2f485a930edd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -15,9 +12,26 @@ #include "rvu_reg.h" #include "rvu.h" #include "npc.h" +#include "mcs.h" #include "cgx.h" - -static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); +#include "lmac_common.h" +#include "rvu_npc_hash.h" + +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id); +static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, + int type, bool add); +static int nix_setup_ipolicers(struct rvu *rvu, + struct nix_hw *nix_hw, int blkaddr); +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); +static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, + struct nix_hw *nix_hw, u16 pcifunc); +static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); +static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, + u32 leaf_prof); +static const char *nix_get_ctx_name(int ctype); +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -59,15 +73,38 @@ enum nix_makr_fmt_indexes { /* For now considering MC resources needed for broadcast * pkt replication only. i.e 256 HWVFs + 12 PFs. */ -#define MC_TBL_SIZE MC_TBL_SZ_512 -#define MC_BUF_CNT MC_BUF_CNT_128 +#define MC_TBL_SIZE MC_TBL_SZ_2K +#define MC_BUF_CNT MC_BUF_CNT_1024 + +#define MC_TX_MAX 2048 struct mce { struct hlist_node node; - u16 idx; + u32 rq_rss_index; u16 pcifunc; + u16 channel; + u8 dest_type; + u8 is_active; + u8 reserved[2]; }; +int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) +{ + int i = 0; + + /*If blkaddr is 0, return the first nix block address*/ + if (blkaddr == 0) + return rvu->nix_blkaddr[blkaddr]; + + while (i + 1 < MAX_NIX_BLKS) { + if (rvu->nix_blkaddr[i] == blkaddr) + return rvu->nix_blkaddr[i + 1]; + i++; + } + + return 0; +} + bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -81,14 +118,52 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) int rvu_get_nixlf_count(struct rvu *rvu) { + int blkaddr = 0, max = 0; struct rvu_block *block; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + block = &rvu->hw->block[blkaddr]; + max += block->lf.max; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } + return max; +} + +int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; int blkaddr; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return 0; - block = &rvu->hw->block[blkaddr]; - return block->lf.max; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (*nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (nix_blkaddr) + *nix_blkaddr = blkaddr; + + return 0; +} + +int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc, + struct nix_hw **nix_hw, int *blkaddr) +{ + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || *blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nix_hw = get_nix_hw(rvu->hw, *blkaddr); + if (!*nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + return 0; } static void nix_mce_list_init(struct nix_mce_list *list, int max) @@ -98,46 +173,127 @@ static void nix_mce_list_init(struct nix_mce_list *list, int max) list->max = max; } -static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) +static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir) { + struct rsrc_bmap *mce_counter; int idx; if (!mcast) - return 0; + return -EINVAL; + + mce_counter = &mcast->mce_counter[dir]; + if (!rvu_rsrc_check_contig(mce_counter, count)) + return -ENOSPC; - idx = mcast->next_free_mce; - mcast->next_free_mce += count; + idx = rvu_alloc_rsrc_contig(mce_counter, count); return idx; } -static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) +static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir) { - if (blkaddr == BLKADDR_NIX0 && hw->nix0) - return hw->nix0; + struct rsrc_bmap *mce_counter; + + if (!mcast) + return; + mce_counter = &mcast->mce_counter[dir]; + rvu_free_rsrc_contig(mce_counter, count, start); +} + +struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) +{ + int nix_blkaddr = 0, i = 0; + struct rvu *rvu = hw->rvu; + + nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); + while (nix_blkaddr) { + if (blkaddr == nix_blkaddr && hw->nix) + return &hw->nix[i]; + nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); + i++; + } return NULL; } +int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type) +{ + if (hw->cap.nix_multiple_dwrr_mtu) + return NIX_AF_DWRR_MTUX(smq_link_type); + + if (smq_link_type == SMQ_LINK_TYPE_SDP) + return NIX_AF_DWRR_SDP_MTU; + + /* Here it's same reg for RPM and LBK */ + return NIX_AF_DWRR_RPM_MTU; +} + +u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) +{ + dwrr_mtu &= 0x1FULL; + + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + switch (dwrr_mtu) { + case 4: + return 9728; + case 5: + return 10240; + default: + return BIT_ULL(dwrr_mtu); + } + + return 0; +} + +u32 convert_bytes_to_dwrr_mtu(u32 bytes) +{ + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + if (bytes > BIT_ULL(16)) + return 0; + + switch (bytes) { + case 9728: + return 4; + case 10240: + return 5; + default: + return ilog2(bytes); + } + + return 0; +} + static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; - /*Sync all in flight RX packets to LLC/DRAM */ + /* Sync all in flight RX packets to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); if (err) - dev_err(rvu->dev, "NIX RX software sync failed\n"); + dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n"); - /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA] - * bit too early. Hence wait for 50us more. + /* SW_SYNC ensures all existing transactions are finished and pkts + * are written to LLC/DRAM, queues should be teared down after + * successful SW_SYNC. Due to a HW errata, in some rare scenarios + * an existing transaction might end after SW_SYNC operation. To + * ensure operation is fully done, do the SW_SYNC twice. */ - if (is_rvu_9xxx_A0(rvu)) - usleep_range(50, 60); + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n"); } static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { + struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; u16 map_func; @@ -155,27 +311,38 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); mutex_unlock(&rvu->rsrc_lock); - /* For TL1 schq, sharing across VF's of same PF is ok */ - if (lvl == NIX_TXSCH_LVL_TL1 && - rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) - return false; + /* TLs aggegating traffic are shared across PF and VFs */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if ((nix_get_tx_link(rvu, map_func) != + nix_get_tx_link(rvu, pcifunc)) && + (rvu_get_pf(rvu->pdev, map_func) != + rvu_get_pf(rvu->pdev, pcifunc))) + return false; + else + return true; + } - if (lvl != NIX_TXSCH_LVL_TL1 && - map_func != pcifunc) + if (map_func != pcifunc) return false; return true; } -static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, + struct nix_lf_alloc_rsp *rsp, bool loop) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); + u16 req_chan_base, req_chan_end, req_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + struct sdp_node_info *sdp_info; + int pkind, pf, vf, lbkid, vfid; u8 cgx_id, lmac_id; - int pkind, pf, vf; + bool from_vf; int err; - pf = rvu_get_pf(pcifunc); - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + pf = rvu_get_pf(rvu->pdev, pcifunc); + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; switch (type) { @@ -189,22 +356,102 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) "PF_Func 0x%x: Invalid pkind\n", pcifunc); return -EINVAL; } - pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); + pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; - pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); - pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : - NIX_CHAN_LBK_CHX(0, vf + 1); + + /* If NIX1 block is present on the silicon then NIXes are + * assigned alternatively for lbk interfaces. NIX0 should + * send packets on lbk link 1 channels and NIX1 should send + * on lbk link 0 channels for the communication between + * NIX0 and NIX1. + */ + lbkid = 0; + if (rvu->hw->lbk_links > 1) + lbkid = vf & 0x1 ? 0 : 1; + + /* By default NIX0 is configured to send packet on lbk link 1 + * (which corresponds to LBK1), same packet will receive on + * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 + * (which corresponds to LBK2) packet will receive on NIX0 lbk + * link 1. + * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 + * transmits and receives on lbk link 0, whick corresponds + * to LBK1 block, back to back connectivity between NIX and + * LBK can be achieved (which is similar to 96xx) + * + * RX TX + * NIX0 lbk link 1 (LBK2) 1 (LBK1) + * NIX0 lbk link 0 (LBK0) 0 (LBK0) + * NIX1 lbk link 0 (LBK1) 0 (LBK2) + * NIX1 lbk link 1 (LBK3) 1 (LBK3) + */ + if (loop) + lbkid = !lbkid; + + /* Note that AF's VFs work in pairs and talk over consecutive + * loopback channels.Therefore if odd number of AF VFs are + * enabled then the last VF remains with no pair. + */ + pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); + pfvf->tx_chan_base = vf & 0x1 ? + rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : + rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = hw->cgx_links + lbkid; + pfvf->lbkid = lbkid; + rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, false); + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); + + break; + case NIX_INTF_TYPE_SDP: + from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; + sdp_info = parent_pf->sdp_info; + if (!sdp_info) { + dev_err(rvu->dev, "Invalid sdp_info pointer\n"); + return -EINVAL; + } + if (from_vf) { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + + sdp_info->num_pf_rings; + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + for (vfid = 0; vfid < vf; vfid++) + req_chan_base += sdp_info->vf_rings[vfid]; + req_chan_cnt = sdp_info->vf_rings[vf]; + req_chan_end = req_chan_base + req_chan_cnt - 1; + if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || + req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid channel base and count\n", + pcifunc); + return -EINVAL; + } + } else { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; + req_chan_cnt = sdp_info->num_pf_rings; + } + + pfvf->rx_chan_base = req_chan_base; + pfvf->rx_chan_cnt = req_chan_cnt; + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; + + rsp->tx_link = hw->cgx_links + hw->lbk_links; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); break; } @@ -215,16 +462,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->rx_chan_base, pfvf->mac_addr); /* Add this PF_FUNC to bcast pkt replication list */ - err = nix_update_bcast_mce_list(rvu, pcifunc, true); + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true); if (err) { dev_err(rvu->dev, "Bcast list, failed to enable PF_FUNC 0x%x\n", pcifunc); return err; } - + /* Install MCAM rule matching Ethernet broadcast mac address */ rvu_npc_install_bcast_match_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); + pfvf->maxlen = NIC_HW_MIN_FRS; pfvf->minlen = NIC_HW_MIN_FRS; @@ -238,10 +486,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) pfvf->maxlen = 0; pfvf->minlen = 0; - pfvf->rxvlan = false; /* Remove this PF_FUNC from bcast pkt replication list */ - err = nix_update_bcast_mce_list(rvu, pcifunc, false); + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false); if (err) { dev_err(rvu->dev, "Bcast list, failed to disable PF_FUNC 0x%x\n", @@ -250,6 +497,306 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); +} + +#define NIX_BPIDS_PER_LMAC 8 +#define NIX_BPIDS_PER_CPT 1 +static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) +{ + struct nix_bp *bp = &hw->bp; + int err, max_bpids; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); + + /* Reserve the BPIds for CGX and SDP */ + bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; + bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); + bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + + NIX_BPIDS_PER_CPT; + bp->bpids.max = max_bpids - bp->free_pool_base; + + err = rvu_alloc_bitmap(&bp->bpids); + if (err) + return err; + + bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u16), GFP_KERNEL); + if (!bp->fn_map) + return -ENOMEM; + + bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->intf_map) + return -ENOMEM; + + bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->ref_cnt) + return -ENOMEM; + + return 0; +} + +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, bpid, err; + struct nix_hw *nix_hw; + struct nix_bp *bp; + + if (!is_lbk_vf(rvu, pcifunc)) + return; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return; + + bp = &nix_hw->bp; + + mutex_lock(&rvu->rsrc_lock); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->ref_cnt[bpid]--; + if (bp->ref_cnt[bpid]) + continue; + rvu_free_rsrc(&bp->bpids, bpid); + bp->fn_map[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); +} + +static u16 nix_get_channel(u16 chan, bool cpt_link) +{ + /* CPT channel for a given link channel is always + * assumed to be BIT(11) set in link channel. + */ + return cpt_link ? chan | BIT(11) : chan; +} + +static int nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp, bool cpt_link) +{ + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, pf, type, err; + u16 chan_base, chan, bpid; + struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; + struct nix_bp *bp; + u16 chan_v; + u64 cfg; + + pf = rvu_get_pf(rvu->pdev, pcifunc); + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + return 0; + + if (is_sdp_pfvf(rvu, pcifunc)) + type = NIX_INTF_TYPE_SDP; + + if (cpt_link && !rvu->hw->cpt_links) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + bp = &nix_hw->bp; + chan_base = pfvf->rx_chan_base + req->chan_base; + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + chan_v = nix_get_channel(chan, cpt_link); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), + cfg & ~BIT_ULL(16)); + + if (type == NIX_INTF_TYPE_LBK) { + bpid = cfg & GENMASK(8, 0); + mutex_lock(&rvu->rsrc_lock); + rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->fn_map[bpid] = 0; + bp->ref_cnt[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); + } + } + return 0; +} + +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, true); +} + +static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, + int type, int chan_id) +{ + int bpid, blkaddr, sdp_chan_base, err; + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; + u8 cgx_id, lmac_id; + struct nix_bp *bp; + + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + bp = &nix_hw->bp; + + /* Backpressure IDs range division + * CGX channles are mapped to (0 - 191) BPIDs + * LBK channles are mapped to (192 - 255) BPIDs + * SDP channles are mapped to (256 - 511) BPIDs + * + * Lmac channles and bpids mapped as follows + * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15) + * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) .... + * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) .... + */ + switch (type) { + case NIX_INTF_TYPE_CGX: + if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) + return NIX_AF_ERR_INVALID_BPID_REQ; + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + /* Assign bpid based on cgx, lmac and chan id */ + bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + + (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; + + if (req->bpid_per_chan) + bpid += chan_id; + if (bpid > bp->cgx_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID; + break; + + case NIX_INTF_TYPE_LBK: + /* Alloc bpid from the free pool */ + mutex_lock(&rvu->rsrc_lock); + bpid = rvu_alloc_rsrc(&bp->bpids); + if (bpid < 0) { + mutex_unlock(&rvu->rsrc_lock); + return NIX_AF_ERR_INVALID_BPID; + } + bp->fn_map[bpid] = req->hdr.pcifunc; + bp->ref_cnt[bpid]++; + bpid += bp->free_pool_base; + mutex_unlock(&rvu->rsrc_lock); + break; + case NIX_INTF_TYPE_SDP: + if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID_REQ; + + /* Handle usecase of 2 SDP blocks */ + if (!hw->cap.programmable_chans) + sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; + else + sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; + + bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + + if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) + return NIX_AF_ERR_INVALID_BPID; + break; + default: + return -EINVAL; + } + return bpid; +} + +static int nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp, + bool cpt_link) +{ + int blkaddr, pf, type, chan_id = 0; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + u16 chan_base, chan; + s16 bpid, bpid_base; + u16 chan_v; + u64 cfg; + + pf = rvu_get_pf(rvu->pdev, pcifunc); + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pfvf(rvu, pcifunc)) + type = NIX_INTF_TYPE_SDP; + + /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) + return 0; + + if (cpt_link && !rvu->hw->cpt_links) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + + bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id); + chan_base = pfvf->rx_chan_base + req->chan_base; + bpid = bpid_base; + + for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { + if (bpid < 0) { + dev_warn(rvu->dev, "Fail to enable backpressure\n"); + return -EINVAL; + } + + chan_v = nix_get_channel(chan, cpt_link); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + cfg &= ~GENMASK_ULL(8, 0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), + cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); + chan_id++; + bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); + } + + for (chan = 0; chan < req->chan_cnt; chan++) { + /* Map channel and bpid assign to it */ + rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 | + (bpid_base & 0x3FF); + if (req->bpid_per_chan) + bpid_base++; + } + rsp->chan_cnt = req->chan_cnt; + + return 0; +} + +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, true); } static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, @@ -382,9 +929,11 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, - int rss_sz, int rss_grps, int hwctx_size) + int rss_sz, int rss_grps, int hwctx_size, + u64 way_mask, bool tag_lsb_as_adder) { int err, grp, num_indices; + u64 val; /* RSS is not requested for this NIXLF */ if (!rss_sz) @@ -400,9 +949,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, (u64)pfvf->rss_ctx->iova); /* Config full RSS table size, enable RSS and caching */ - rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), - BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE)); + val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); + + if (tag_lsb_as_adder) + val |= BIT_ULL(5); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -417,6 +970,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, struct nix_aq_res_s *result; int timeout = 1000; u64 reg, head; + int ret; result = (struct nix_aq_res_s *)aq->res->base; @@ -440,15 +994,50 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return -EBUSY; } - if (result->compcode != NIX_AQ_COMP_GOOD) + if (result->compcode != NIX_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ + if (result->compcode == NIX_AQ_COMP_CTX_FAULT || + result->compcode == NIX_AQ_COMP_LOCKERR || + result->compcode == NIX_AQ_COMP_CTX_POISON) { + ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); + if (ret) + dev_err(rvu->dev, + "%s: Not able to unlock cachelines\n", __func__); + } + return -EBUSY; + } return 0; } -static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp) +static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req, + u16 *smq, u16 *smq_mask) +{ + struct nix_cn10k_aq_enq_req *aq_req; + + if (is_cn20k(rvu->pdev)) { + *smq = ((struct nix_cn20k_aq_enq_req *)req)->sq.smq; + *smq_mask = ((struct nix_cn20k_aq_enq_req *)req)->sq_mask.smq; + return; + } + + if (!is_rvu_otx2(rvu)) { + aq_req = (struct nix_cn10k_aq_enq_req *)req; + *smq = aq_req->sq.smq; + *smq_mask = aq_req->sq_mask.smq; + } else { + *smq = req->sq.smq; + *smq_mask = req->sq_mask.smq; + } +} + +static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -457,14 +1046,12 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct rvu_block *block; struct admin_queue *aq; struct rvu_pfvf *pfvf; + u16 smq, smq_mask; void *ctx, *mask; bool ena; u64 cfg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - + blkaddr = nix_hw->blkaddr; block = &hw->block[blkaddr]; aq = block->aq; if (!aq) { @@ -475,8 +1062,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); nixlf = rvu_get_lf(rvu, block, pcifunc, 0); - /* Skip NIXLF check for broadcast MCE entry init */ - if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { + /* Skip NIXLF check for broadcast MCE entry and bandwidth profile + * operations done by AF itself. + */ + if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) || + (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) { if (!pfvf->nixlf || nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; } @@ -504,8 +1094,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, break; case NIX_AQ_CTYPE_MCE: cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); + /* Check if index exceeds MCE list length */ - if (!hw->nix0->mcast.mce_ctx || + if (!nix_hw->mcast.mce_ctx || (req->qidx >= (256UL << (cfg & 0xF)))) rc = NIX_AF_ERR_AQ_ENQUEUE; @@ -515,6 +1106,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (rsp) rc = NIX_AF_ERR_AQ_ENQUEUE; break; + case NIX_AQ_CTYPE_BANDPROF: + if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req, + nix_hw, pcifunc)) + rc = NIX_AF_ERR_INVALID_BANDPROF; + break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; } @@ -522,13 +1118,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (rc) return rc; + nix_get_aq_req_smq(rvu, req, &smq, &smq_mask); /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ && ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || (req->op == NIX_AQ_INSTOP_WRITE && - req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { + req->sq_mask.ena && req->sq.ena && smq_mask))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, - pcifunc, req->sq.smq)) + pcifunc, smq)) return NIX_AF_ERR_AQ_ENQUEUE; } @@ -542,6 +1139,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, */ inst.res_addr = (u64)aq->res->iova; + /* Hardware uses same aq->res->base for updating result of + * previous instruction hence wait here till it is done. + */ + spin_lock(&aq->lock); + /* Clean result + context memory */ memset(aq->res->base, 0, aq->res->entry_sz); /* Context needs to be written at RES_ADDR + 128 */ @@ -553,31 +1155,36 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, case NIX_AQ_INSTOP_WRITE: if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(mask, &req->rq_mask, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(mask, &req->sq_mask, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(mask, &req->cq_mask, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(mask, &req->rss_mask, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(mask, &req->mce_mask, - sizeof(struct nix_rx_mce_s)); - /* Fall through */ + NIX_MAX_CTX_SIZE); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(mask, &req->prof_mask, + NIX_MAX_CTX_SIZE); + fallthrough; case NIX_AQ_INSTOP_INIT: if (req->ctype == NIX_AQ_CTYPE_RQ) - memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s)); + memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) - memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); + memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) - memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); + memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) - memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s)); + memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) - memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s)); + memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE); break; case NIX_AQ_INSTOP_NOP: case NIX_AQ_INSTOP_READ: @@ -586,11 +1193,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, break; default: rc = NIX_AF_ERR_AQ_ENQUEUE; + spin_unlock(&aq->lock); return rc; } - spin_lock(&aq->lock); - /* Submit the instruction to AQ */ rc = nix_aq_enqueue_wait(rvu, block, &inst); if (rc) { @@ -643,19 +1249,22 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (req->op == NIX_AQ_INSTOP_READ) { if (req->ctype == NIX_AQ_CTYPE_RQ) memcpy(&rsp->rq, ctx, - sizeof(struct nix_rq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_SQ) memcpy(&rsp->sq, ctx, - sizeof(struct nix_sq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_CQ) memcpy(&rsp->cq, ctx, - sizeof(struct nix_cq_ctx_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_RSS) memcpy(&rsp->rss, ctx, - sizeof(struct nix_rsse_s)); + NIX_MAX_CTX_SIZE); else if (req->ctype == NIX_AQ_CTYPE_MCE) memcpy(&rsp->mce, ctx, - sizeof(struct nix_rx_mce_s)); + NIX_MAX_CTX_SIZE); + else if (req->ctype == NIX_AQ_CTYPE_BANDPROF) + memcpy(&rsp->prof, ctx, + NIX_MAX_CTX_SIZE); } } @@ -663,6 +1272,113 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return 0; } +static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, u8 ctype) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + int rc, word; + + if (req->ctype != NIX_AQ_CTYPE_CQ) + return 0; + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + req->hdr.pcifunc, ctype, req->qidx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", + __func__, nix_get_ctx_name(ctype), req->qidx, + req->hdr.pcifunc); + return rc; + } + + /* Make copy of original context & mask which are required + * for resubmission + */ + memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE); + memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE); + + /* exclude fields which HW can update */ + aq_req.cq_mask.cq_err = 0; + aq_req.cq_mask.wrptr = 0; + aq_req.cq_mask.tail = 0; + aq_req.cq_mask.head = 0; + aq_req.cq_mask.avg_level = 0; + aq_req.cq_mask.update_time = 0; + aq_req.cq_mask.substream = 0; + + /* Context mask (cq_mask) holds mask value of fields which + * are changed in AQ WRITE operation. + * for example cq.drop = 0xa; + * cq_mask.drop = 0xff; + * Below logic performs '&' between cq and cq_mask so that non + * updated fields are masked out for request and response + * comparison + */ + for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64); + word++) { + *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + *(u64 *)((u8 *)&aq_req.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + } + + if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE)) + return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; + + return 0; +} + +int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + struct nix_hw *nix_hw; + int err, retries = 5; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + +retry: + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); + + /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' + * As a work around perfrom CQ context read after each AQ write. If AQ + * read shows AQ write is not updated perform AQ write again. + */ + if (!err && req->op == NIX_AQ_INSTOP_WRITE) { + err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); + if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { + if (retries--) + goto retry; + else + return NIX_AF_ERR_CQ_CTX_WRITE_ERR; + } + } + + return err; +} + +static const char *nix_get_ctx_name(int ctype) +{ + switch (ctype) { + case NIX_AQ_CTYPE_CQ: + return "CQ"; + case NIX_AQ_CTYPE_SQ: + return "SQ"; + case NIX_AQ_CTYPE_RQ: + return "RQ"; + case NIX_AQ_CTYPE_RSS: + return "RSS"; + } + return ""; +} + static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -680,6 +1396,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (req->ctype == NIX_AQ_CTYPE_CQ) { aq_req.cq.ena = 0; aq_req.cq_mask.ena = 1; + aq_req.cq.bp_ena = 0; + aq_req.cq_mask.bp_ena = 1; q_cnt = pfvf->cq_ctx->qsize; bmap = pfvf->cq_bmap; } @@ -707,21 +1425,68 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) if (rc) { err = rc; dev_err(rvu->dev, "Failed to disable %s:%d context\n", - (req->ctype == NIX_AQ_CTYPE_CQ) ? - "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ? - "RQ" : "SQ"), qidx); + nix_get_ctx_name(req->ctype), qidx); } } return err; } +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING +static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req) +{ + struct nix_aq_enq_req lock_ctx_req; + int err; + + if (req->op != NIX_AQ_INSTOP_INIT) + return 0; + + if (req->ctype == NIX_AQ_CTYPE_MCE || + req->ctype == NIX_AQ_CTYPE_DYNO) + return 0; + + memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req)); + lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc; + lock_ctx_req.ctype = req->ctype; + lock_ctx_req.op = NIX_AQ_INSTOP_LOCK; + lock_ctx_req.qidx = req->qidx; + err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL); + if (err) + dev_err(rvu->dev, + "PFUNC 0x%x: Failed to lock NIX %s:%d context\n", + req->hdr.pcifunc, + nix_get_ctx_name(req->ctype), req->qidx); + return err; +} + +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + int err; + + err = rvu_nix_aq_enq_inst(rvu, req, rsp); + if (!err) + err = nix_lf_hwctx_lockdown(rvu, req); + return err; +} +#else + int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } +#endif +/* CN10K mbox handler */ +int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, + struct nix_cn10k_aq_enq_req *req, + struct nix_cn10k_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, + (struct nix_aq_enq_rsp *)rsp); +} int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -745,6 +1510,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt) return NIX_AF_ERR_PARAM; + if (req->way_mask) + req->way_mask &= 0xFFFF; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (!pfvf->nixlf || blkaddr < 0) @@ -810,7 +1578,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, (u64)pfvf->rq_ctx->iova); /* Set caching and queue count in HW */ - cfg = BIT_ULL(36) | (req->rq_cnt - 1); + cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg); /* Alloc NIX SQ HW context memory and config the base */ @@ -825,7 +1593,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf), (u64)pfvf->sq_ctx->iova); - cfg = BIT_ULL(36) | (req->sq_cnt - 1); + + cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg); /* Alloc NIX CQ HW context memory and config the base */ @@ -840,13 +1609,15 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf), (u64)pfvf->cq_ctx->iova); - cfg = BIT_ULL(36) | (req->cq_cnt - 1); + + cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20; rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg); /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); - err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, - req->rss_sz, req->rss_grps, hwctx_size); + err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, + req->rss_grps, hwctx_size, req->way_mask, + !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); if (err) goto free_mem; @@ -860,7 +1631,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), (u64)pfvf->cq_ints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36)); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Alloc memory for QINT's HW contexts */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); @@ -872,7 +1645,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), (u64)pfvf->nix_qints_ctx->iova); - rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), + BIT_ULL(36) | req->way_mask << 20); /* Setup VLANX TPID's. * Use VLAN1 for 802.1Q @@ -896,14 +1670,33 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - err = nix_interface_init(rvu, pcifunc, intf, nixlf); + /* Configure pkind for TX parse config */ + cfg = NPC_TX_DEF_PKIND; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + + if (is_rep_dev(rvu, pcifunc)) { + pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; + pfvf->tx_chan_cnt = 1; + goto exit; + } + + intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pfvf(rvu, pcifunc)) + intf = NIX_INTF_TYPE_SDP; + + err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, + !!(req->flags & NIX_LF_LBK_BLK_SEL)); if (err) goto free_mem; /* Disable NPC entries as NIXLF's contexts are not initialized yet */ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + /* Configure RX VTAG Type 7 (strip) for vf vlan */ + rvu_write64(rvu, blkaddr, + NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), + VTAGSIZE_T4 | VTAG_STRIP); + goto exit; free_mem: @@ -931,10 +1724,14 @@ exit: cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); rsp->qints = ((cfg >> 12) & 0xFFF); rsp->cints = ((cfg >> 24) & 0xFFF); + rsp->cgx_links = hw->cgx_links; + rsp->lbk_links = hw->lbk_links; + rsp->sdp_links = hw->sdp_links; + return rc; } -int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -953,8 +1750,21 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (is_rep_dev(rvu, pcifunc)) + goto free_lf; + + if (req->flags & NIX_LF_DISABLE_FLOWS) + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + else + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); + + /* Free any tx vtag def entries used by this NIX LF */ + if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) + nix_free_tx_vtag_entries(rvu, pcifunc); + nix_interface_deinit(rvu, pcifunc, nixlf); +free_lf: /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { @@ -985,7 +1795,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; cfg = (((u32)req->offset & 0x7) << 16) | (((u32)req->y_mask & 0xF) << 12) | @@ -995,7 +1805,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); if (rc < 0) { dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return NIX_AF_ERR_MARK_CFG_FAIL; } @@ -1003,12 +1814,140 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, return 0; } +/* Handle shaper update specially for few revisions */ +static bool +handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, + int lvl, u64 reg, u64 regval) +{ + u64 regbase, oldval, sw_xoff = 0; + u64 dbgval, md_debug0 = 0; + unsigned long poll_tmo; + bool rate_reg = 0; + u32 schq; + + regbase = reg & 0xFFFF; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + + /* Check for rate register */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); + + rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); + break; + case NIX_TXSCH_LVL_TL2: + md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL3: + md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL4: + md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)); + break; + case NIX_TXSCH_LVL_MDQ: + sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); + rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)); + break; + } + + if (!rate_reg) + return false; + + /* Nothing special to do when state is not toggled */ + oldval = rvu_read64(rvu, blkaddr, reg); + if ((oldval & 0x1) == (regval & 0x1)) { + rvu_write64(rvu, blkaddr, reg, regval); + return true; + } + + /* PIR/CIR disable */ + if (!(regval & 0x1)) { + rvu_write64(rvu, blkaddr, sw_xoff, 1); + rvu_write64(rvu, blkaddr, reg, 0); + udelay(4); + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; + } + + /* PIR/CIR enable */ + rvu_write64(rvu, blkaddr, sw_xoff, 1); + if (md_debug0) { + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ + do { + if (time_after(jiffies, poll_tmo)) { + dev_err(rvu->dev, + "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", + nixlf, schq, lvl); + goto exit; + } + usleep_range(1, 5); + dbgval = rvu_read64(rvu, blkaddr, md_debug0); + } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); + } + rvu_write64(rvu, blkaddr, reg, regval); +exit: + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; +} + +static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + u64 tlx_parent = 0, tlx_schedule = 0; + + switch (lvl) { + case NIX_TXSCH_LVL_TL2: + tlx_parent = NIX_AF_TL2X_PARENT(schq); + tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq); + break; + case NIX_TXSCH_LVL_TL3: + tlx_parent = NIX_AF_TL3X_PARENT(schq); + tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq); + break; + case NIX_TXSCH_LVL_TL4: + tlx_parent = NIX_AF_TL4X_PARENT(schq); + tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq); + break; + case NIX_TXSCH_LVL_MDQ: + /* no need to reset SMQ_CFG as HW clears this CSR + * on SMQ flush + */ + tlx_parent = NIX_AF_MDQX_PARENT(schq); + tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq); + break; + default: + return; + } + + if (tlx_parent) + rvu_write64(rvu, blkaddr, tlx_parent, 0x0); + + if (tlx_schedule) + rvu_write64(rvu, blkaddr, tlx_schedule, 0x0); +} + /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, - int lvl, int schq) + int nixlf, int lvl, int schq) { + struct rvu_hwinfo *hw = rvu->hw; u64 cir_reg = 0, pir_reg = 0; u64 cfg; @@ -1029,6 +1968,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, cir_reg = NIX_AF_TL4X_CIR(schq); pir_reg = NIX_AF_TL4X_PIR(schq); break; + case NIX_TXSCH_LVL_MDQ: + cir_reg = NIX_AF_MDQX_CIR(schq); + pir_reg = NIX_AF_MDQX_PIR(schq); + break; + } + + /* Shaper state toggle needs wait/poll */ + if (hw->cap.nix_shaper_toggle_wait) { + if (cir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, cir_reg, 0); + if (pir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, pir_reg, 0); + return; } if (!cir_reg) @@ -1046,13 +2000,19 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; + int link_level; int link; + if (lvl >= hw->cap.nix_tx_aggr_lvl) + return; + /* Reset TL4's SDP link config */ if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); - if (lvl != NIX_TXSCH_LVL_TL2) + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl != link_level) return; /* Reset TL2's CGX or LBK link config */ @@ -1061,173 +2021,295 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } -static int -rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc, - u16 *schq_list, u16 *schq_cnt) +static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, + int lvl, int schq) { - struct nix_txsch *txsch; - struct nix_hw *nix_hw; - struct rvu_pfvf *pfvf; - u8 cgx_id, lmac_id; - u16 schq_base; - u32 *pfvf_map; - int pf, intf; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -ENODEV; - - pfvf = rvu_get_pfvf(rvu, pcifunc); - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; - pfvf_map = txsch->pfvf_map; - pf = rvu_get_pf(pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + u64 reg; - /* static allocation as two TL1's per link */ - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + /* Skip this if shaping is not supported */ + if (!hw->cap.nix_shaping) + return; - switch (intf) { - case NIX_INTF_TYPE_CGX: - rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); - schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2; + /* Clear level specific SW_XOFF */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1X_SW_XOFF(schq); break; - case NIX_INTF_TYPE_LBK: - schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_MDQ: + reg = NIX_AF_MDQX_SW_XOFF(schq); break; default: - return -ENODEV; + return; } - if (schq_base + 1 > txsch->schq.max) - return -ENODEV; + rvu_write64(rvu, blkaddr, reg, 0x0); +} - /* init pfvf_map as we store flags */ - if (pfvf_map[schq_base] == U32_MAX) { - pfvf_map[schq_base] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); - pfvf_map[schq_base + 1] = - TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(rvu->pdev, pcifunc); + u8 cgx_id = 0, lmac_id = 0; + + if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ + return hw->cgx_links; + } else if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return (cgx_id * hw->lmac_per_cgx) + lmac_id; + } - /* Onetime reset for TL1 */ - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base); + /* SDP link */ + return hw->cgx_links + hw->lbk_links; +} + +static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, + int link, int *start, int *end) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(rvu->pdev, pcifunc); + + /* LBK links */ + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_lbk_lmac; + } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ + *start = hw->cap.nix_txsch_per_cgx_lmac * link; + *end = *start + hw->cap.nix_txsch_per_cgx_lmac; + } else { /* SDP link */ + *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) + + (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links); + *end = *start + hw->cap.nix_txsch_per_sdp_lmac; + } +} + +static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc, + struct nix_hw *nix_hw, + struct nix_txsch_alloc_req *req) +{ + struct rvu_hwinfo *hw = rvu->hw; + int schq, req_schq, free_cnt; + struct nix_txsch *txsch; + int link, start, end; + + txsch = &nix_hw->txsch[lvl]; + req_schq = req->schq_contig[lvl] + req->schq[lvl]; + + if (!req_schq) + return 0; - nix_reset_tx_linkcfg(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); - nix_reset_tx_shaping(rvu, blkaddr, - NIX_TXSCH_LVL_TL1, schq_base + 1); + link = nix_get_tx_link(rvu, pcifunc); + + /* For traffic aggregating scheduler level, one queue is enough */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + if (req_schq != 1) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } - if (schq_list && schq_cnt) { - schq_list[0] = schq_base; - schq_list[1] = schq_base + 1; - *schq_cnt = 2; + /* Get free SCHQ count and check if request can be accomodated */ + if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + schq = start + (pcifunc & RVU_PFVF_FUNC_MASK); + if (end <= txsch->schq.max && schq < end && + !test_bit(schq, txsch->schq.bmap)) + free_cnt = 1; + else + free_cnt = 0; + } else { + free_cnt = rvu_rsrc_free_count(&txsch->schq); } + if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC || + req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + + /* If contiguous queues are needed, check for availability */ + if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] && + !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) + return NIX_AF_ERR_TLX_ALLOC_FAIL; + return 0; } +static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch, + struct nix_txsch_alloc_rsp *rsp, + int lvl, int start, int end) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = rsp->hdr.pcifunc; + int idx, schq; + + /* For traffic aggregating levels, queue alloc is based + * on transmit link to which PF_FUNC is mapped to. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + /* A single TL queue is allocated */ + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + rsp->schq_contig_list[lvl][0] = start; + } + + /* Both contig and non-contig reqs doesn't make sense here */ + if (rsp->schq_contig[lvl]) + rsp->schq[lvl] = 0; + + if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + rsp->schq_list[lvl][0] = start; + } + return; + } + + /* Adjust the queue request count if HW supports + * only one queue per level configuration. + */ + if (hw->cap.nix_fixed_txschq_mapping) { + idx = pcifunc & RVU_PFVF_FUNC_MASK; + schq = start + idx; + if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) { + rsp->schq_contig[lvl] = 0; + rsp->schq[lvl] = 0; + return; + } + + if (rsp->schq_contig[lvl]) { + rsp->schq_contig[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][0] = schq; + rsp->schq[lvl] = 0; + } else if (rsp->schq[lvl]) { + rsp->schq[lvl] = 1; + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][0] = schq; + } + return; + } + + /* Allocate contiguous queue indices requesty first */ + if (rsp->schq_contig[lvl]) { + schq = bitmap_find_next_zero_area(txsch->schq.bmap, + txsch->schq.max, start, + rsp->schq_contig[lvl], 0); + if (schq >= end) + rsp->schq_contig[lvl] = 0; + for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_contig_list[lvl][idx] = schq; + schq++; + } + } + + /* Allocate non-contiguous queue indices */ + if (rsp->schq[lvl]) { + idx = 0; + for (schq = start; schq < end; schq++) { + if (!test_bit(schq, txsch->schq.bmap)) { + set_bit(schq, txsch->schq.bmap); + rsp->schq_list[lvl][idx++] = schq; + } + if (idx == rsp->schq[lvl]) + break; + } + /* Update how many were allocated */ + rsp->schq[lvl] = idx; + } +} + int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { + struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int link, blkaddr, rc = 0; + int lvl, idx, start, end; struct nix_txsch *txsch; - int lvl, idx, req_schq; - struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; - int blkaddr, rc = 0; u32 *pfvf_map; + int nixlf; u16 schq; - pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; mutex_lock(&rvu->rsrc_lock); - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - txsch = &nix_hw->txsch[lvl]; - req_schq = req->schq_contig[lvl] + req->schq[lvl]; - pfvf_map = txsch->pfvf_map; - - if (!req_schq) - continue; - - /* There are only 28 TL1s */ - if (lvl == NIX_TXSCH_LVL_TL1) { - if (req->schq_contig[lvl] || - req->schq[lvl] > 2 || - rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, NULL, NULL)) - goto err; - continue; - } - - /* Check if request is valid */ - if (req_schq > MAX_TXSCHQ_PER_FUNC) - goto err; - - /* If contiguous queues are needed, check for availability */ - if (req->schq_contig[lvl] && - !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl])) - goto err; - /* Check if full request can be accommodated */ - if (req_schq >= rvu_rsrc_free_count(&txsch->schq)) + /* Check if request is valid as per HW capabilities + * and can be accomodated. + */ + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req); + if (rc) goto err; } + /* Allocate requested Tx scheduler queues */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; - rsp->schq_contig[lvl] = req->schq_contig[lvl]; pfvf_map = txsch->pfvf_map; - rsp->schq[lvl] = req->schq[lvl]; if (!req->schq[lvl] && !req->schq_contig[lvl]) continue; - /* Handle TL1 specially as it is - * allocation is restricted to 2 TL1's - * per link - */ + rsp->schq[lvl] = req->schq[lvl]; + rsp->schq_contig[lvl] = req->schq_contig[lvl]; - if (lvl == NIX_TXSCH_LVL_TL1) { - rsp->schq_contig[lvl] = 0; - rvu_get_tl1_schqs(rvu, blkaddr, pcifunc, - &rsp->schq_list[lvl][0], - &rsp->schq[lvl]); - continue; + link = nix_get_tx_link(rvu, pcifunc); + + if (lvl >= hw->cap.nix_tx_aggr_lvl) { + start = link; + end = link; + } else if (hw->cap.nix_fixed_txschq_mapping) { + nix_get_txschq_range(rvu, pcifunc, link, &start, &end); + } else { + start = 0; + end = txsch->schq.max; } - /* Alloc contiguous queues first */ - if (req->schq_contig[lvl]) { - schq = rvu_alloc_rsrc_contig(&txsch->schq, - req->schq_contig[lvl]); + nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end); - for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + /* Reset queue config */ + for (idx = 0; idx < req->schq_contig[lvl]; idx++) { + schq = rsp->schq_contig_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); - nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_contig_list[lvl][idx] = schq; - schq++; - } + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); + nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); } - /* Alloc non-contiguous queues */ for (idx = 0; idx < req->schq[lvl]; idx++) { - schq = rvu_alloc_rsrc(&txsch->schq); - pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); + schq = rsp->schq_list[lvl][idx]; + if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) & + NIX_TXSCHQ_CFG_DONE)) + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); - rsp->schq_list[lvl][idx] = schq; + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); + nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); } } + + rsp->aggr_level = hw->cap.nix_tx_aggr_lvl; + rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO; + rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr, + NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; goto exit; err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; @@ -1236,13 +2318,220 @@ exit: return rc; } +static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, + struct nix_smq_flush_ctx *smq_flush_ctx) +{ + struct nix_smq_tree_ctx *smq_tree_ctx; + u64 parent_off, regval; + u16 schq; + int lvl; + + smq_flush_ctx->smq = smq; + + schq = smq; + for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { + smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; + smq_tree_ctx->schq = schq; + if (lvl == NIX_TXSCH_LVL_TL1) { + smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); + smq_tree_ctx->pir_off = 0; + smq_tree_ctx->pir_val = 0; + parent_off = 0; + } else if (lvl == NIX_TXSCH_LVL_TL2) { + smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); + smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); + parent_off = NIX_AF_TL2X_PARENT(schq); + } else if (lvl == NIX_TXSCH_LVL_TL3) { + smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq); + smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq); + parent_off = NIX_AF_TL3X_PARENT(schq); + } else if (lvl == NIX_TXSCH_LVL_TL4) { + smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq); + smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq); + parent_off = NIX_AF_TL4X_PARENT(schq); + } else if (lvl == NIX_TXSCH_LVL_MDQ) { + smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq); + smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq); + parent_off = NIX_AF_MDQX_PARENT(schq); + } + /* save cir/pir register values */ + smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off); + if (smq_tree_ctx->pir_off) + smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off); + + /* get parent txsch node */ + if (parent_off) { + regval = rvu_read64(rvu, blkaddr, parent_off); + schq = (regval >> 16) & 0x1FF; + } + } +} + +static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, + struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) +{ + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + int tl2, tl2_schq; + u64 regoff; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + /* loop through all TL2s with matching PF_FUNC */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; + for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { + /* skip the smq(flush) TL2 */ + if (tl2 == tl2_schq) + continue; + /* skip unused TL2s */ + if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) + continue; + /* skip if PF_FUNC doesn't match */ + if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != + (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & + ~RVU_PFVF_FUNC_MASK))) + continue; + /* enable/disable XOFF */ + regoff = NIX_AF_TL2X_SW_XOFF(tl2); + if (enable) + rvu_write64(rvu, blkaddr, regoff, 0x1); + else + rvu_write64(rvu, blkaddr, regoff, 0x0); + } +} + +static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr, + struct nix_smq_flush_ctx *smq_flush_ctx, bool enable) +{ + u64 cir_off, pir_off, cir_val, pir_val; + struct nix_smq_tree_ctx *smq_tree_ctx; + int lvl; + + for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { + smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; + cir_off = smq_tree_ctx->cir_off; + cir_val = smq_tree_ctx->cir_val; + pir_off = smq_tree_ctx->pir_off; + pir_val = smq_tree_ctx->pir_val; + + if (enable) { + rvu_write64(rvu, blkaddr, cir_off, cir_val); + if (lvl != NIX_TXSCH_LVL_TL1) + rvu_write64(rvu, blkaddr, pir_off, pir_val); + } else { + rvu_write64(rvu, blkaddr, cir_off, 0x0); + if (lvl != NIX_TXSCH_LVL_TL1) + rvu_write64(rvu, blkaddr, pir_off, 0x0); + } + } +} + +static int nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) +{ + struct nix_smq_flush_ctx *smq_flush_ctx; + int err, restore_tx_en = 0, i; + int pf = rvu_get_pf(rvu->pdev, pcifunc); + u8 cgx_id = 0, lmac_id = 0; + u16 tl2_tl3_link_schq; + u8 link, link_level; + u64 cfg, bmap = 0; + + if (!is_rvu_otx2(rvu)) { + /* Skip SMQ flush if pkt count is zero */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq)); + if (!cfg) + return 0; + } + + /* enable cgx tx if disabled */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */ + smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL); + if (!smq_flush_ctx) + return -ENOMEM; + nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx); + nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); + nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); + + /* Disable backpressure from physical link, + * otherwise SMQ flush may stall. + */ + rvu_cgx_enadis_rx_bp(rvu, pf, false); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; + link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; + + /* SMQ set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + if (!(cfg & BIT_ULL(12))) + continue; + bmap |= BIT_ULL(i); + cfg &= ~BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + + /* Do SMQ flush and set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Wait for flush to complete */ + err = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); + if (err) + dev_info(rvu->dev, + "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", + nixlf, smq); + + /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + if (!(bmap & BIT_ULL(i))) + continue; + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + cfg |= BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + + /* clear XOFF on TL2s */ + nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); + nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); + kfree(smq_flush_ctx); + + rvu_cgx_enadis_rx_bp(rvu, pf, true); + /* restore cgx tx state */ + if (restore_tx_en) + rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + return err; +} + static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) { int blkaddr, nixlf, lvl, schq, err; struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; - u64 cfg; + u16 map_func; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1250,66 +2539,72 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; - /* Disable TL2/3 queue links before SMQ flush*/ + /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ mutex_lock(&rvu->rsrc_lock); - for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; - txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } } + nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, + nix_get_tx_link(rvu, pcifunc)); + + /* On PF cleanup, clear cfg done flag as + * PF would have changed default config. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; + schq = nix_get_tx_link(rvu, pcifunc); + /* Do not clear pcifunc in txsch->pfvf_map[schq] because + * VF might be using this TL1 queue + */ + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); + } /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - err = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (err) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); } /* Now free scheduler queues to free pool */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - /* Free all SCHQ's except TL1 as - * TL1 is shared across all VF's for a RVU PF - */ - if (lvl == NIX_TXSCH_LVL_TL1) + /* TLs above aggregation level are shared across all PF + * and it's VFs, hence skip freeing them. + */ + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; + nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } } mutex_unlock(&rvu->rsrc_lock); - /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ - rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); - err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); @@ -1319,13 +2614,13 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch_free_req *req) { - int lvl, schq, nixlf, blkaddr, rc; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; + int lvl, schq, nixlf, blkaddr; struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; - u64 cfg; + int rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1333,7 +2628,7 @@ static int nix_txschq_free_one(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) @@ -1343,44 +2638,45 @@ static int nix_txschq_free_one(struct rvu *rvu, schq = req->schq; txsch = &nix_hw->txsch[lvl]; - /* Don't allow freeing TL1 */ - if (lvl > NIX_TXSCH_LVL_TL2 || - schq >= txsch->schq.max) - goto err; + if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max) + return 0; pfvf_map = txsch->pfvf_map; mutex_lock(&rvu->rsrc_lock); if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { - mutex_unlock(&rvu->rsrc_lock); + rc = NIX_AF_ERR_TLX_INVALID; goto err; } + /* Clear SW_XOFF of this resource only. + * For SMQ level, all path XOFF's + * need to be made clear by user + */ + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); + + nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); + /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - - /* Wait for flush to complete */ - rc = rvu_poll_reg(rvu, blkaddr, - NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); - if (rc) { - dev_err(rvu->dev, - "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); - } + if (lvl == NIX_TXSCH_LVL_SMQ && + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { + rc = NIX_AF_SMQ_FLUSH_FAILED; + goto err; } + nix_reset_tx_schedule(rvu, blkaddr, lvl, schq); + /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); - txsch->pfvf_map[schq] = 0; + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); mutex_unlock(&rvu->rsrc_lock); return 0; err: - return NIX_AF_ERR_TLX_INVALID; + mutex_unlock(&rvu->rsrc_lock); + return rc; } int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, @@ -1393,8 +2689,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, return nix_txschq_free_one(rvu, req); } -static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, - int lvl, u64 reg, u64 regval) +static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, + int lvl, u64 reg, u64 regval) { u64 regbase = reg & 0xFFFF; u16 schq, parent; @@ -1431,111 +2727,196 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, return true; } -static int -nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc) +static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) { - u16 schq_list[2], schq_cnt, schq; - int blkaddr, idx, err = 0; - u16 map_func, map_flags; - struct nix_hw *nix_hw; - u64 reg, regval; - u32 *pfvf_map; + u64 regbase; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + if (hw->cap.nix_shaping) + return true; - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return -EINVAL; + /* If shaping and coloring is not supported, then + * *_CIR and *_PIR registers should not be configured. + */ + regbase = reg & 0xFFFF; + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + if (regbase == NIX_AF_TL1X_CIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL2: + if (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL3: + if (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_TL4: + if (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)) + return false; + break; + case NIX_TXSCH_LVL_MDQ: + if (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)) + return false; + break; + } + return true; +} + +static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, + u16 pcifunc, int blkaddr) +{ + u32 *pfvf_map; + int schq; + + schq = nix_get_tx_link(rvu, pcifunc); pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; + /* Skip if PF has already done the config */ + if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE) + return; + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), + (TXSCH_TL1_DFLT_RR_PRIO << 1)); - mutex_lock(&rvu->rsrc_lock); + /* On OcteonTx2 the config was in bytes and newer silcons + * it's changed to weight. + */ + if (!rvu->hw->cap.nix_common_dwrr_mtu) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + else + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + CN10K_MAX_DWRR_WEIGHT); - err = rvu_get_tl1_schqs(rvu, blkaddr, - pcifunc, schq_list, &schq_cnt); - if (err) - goto unlock; + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); +} - for (idx = 0; idx < schq_cnt; idx++) { - schq = schq_list[idx]; - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); +/* Register offset - [15:0] + * Scheduler Queue number - [25:16] + */ +#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) - /* check if config is already done or this is pf */ - if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE) - continue; +static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, struct nix_txschq_config *req, + struct nix_txschq_config *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int idx, schq; + u64 reg; - /* default configuration */ - reg = NIX_AF_TL1X_TOPOLOGY(schq); - regval = (TXSCH_TL1_DFLT_RR_PRIO << 1); - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_SCHEDULE(schq); - regval = TXSCH_TL1_DFLT_RR_QTM; - rvu_write64(rvu, blkaddr, reg, regval); - reg = NIX_AF_TL1X_CIR(schq); - regval = 0; - rvu_write64(rvu, blkaddr, reg, regval); + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || + !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) + return NIX_AF_INVAL_TXSCHQ_CFG; + rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); + } + rsp->lvl = req->lvl; + rsp->num_regs = req->num_regs; + return 0; +} - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); +void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, + struct nix_txsch *txsch, bool enable) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lbk_link_start, lbk_links; + u8 pf = rvu_get_pf(rvu->pdev, pcifunc); + int schq; + u64 cfg; + + if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) + return; + + cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; + lbk_link_start = hw->cgx_links; + + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + /* Enable all LBK links with channel 63 by default so that + * packets can be sent to LBK with a NPC TX MCAM rule + */ + lbk_links = hw->lbk_links; + while (lbk_links--) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, + lbk_link_start + + lbk_links), cfg); } -unlock: - mutex_unlock(&rvu->rsrc_lock); - return err; } int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, - struct msg_rsp *rsp) + struct nix_txschq_config *rsp) { - u16 schq, pcifunc = req->hdr.pcifunc; + u64 reg, val, regval, schq_regbase, val_mask; struct rvu_hwinfo *hw = rvu->hw; - u64 reg, regval, schq_regbase; + u16 pcifunc = req->hdr.pcifunc; struct nix_txsch *txsch; - u16 map_func, map_flags; struct nix_hw *nix_hw; int blkaddr, idx, err; + int nixlf, schq; u32 *pfvf_map; - int nixlf; if (req->lvl >= NIX_TXSCH_LVL_CNT || req->num_regs > MAX_REGS_PER_MBOX_MSG) return NIX_AF_INVAL_TXSCHQ_CFG; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + if (req->read) + return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; - /* VF is only allowed to trigger - * setting default cfg on TL1 - */ - if (pcifunc & RVU_PFVF_FUNC_MASK && - req->lvl == NIX_TXSCH_LVL_TL1) { - return nix_tl1_default_cfg(rvu, pcifunc); + if (req->lvl >= hw->cap.nix_tx_aggr_lvl && + pcifunc & RVU_PFVF_FUNC_MASK) { + mutex_lock(&rvu->rsrc_lock); + if (req->lvl == NIX_TXSCH_LVL_TL1) + nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr); + mutex_unlock(&rvu->rsrc_lock); + return 0; } for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; + val_mask = req->regval_mask[idx]; - if (!is_txschq_config_valid(rvu, pcifunc, blkaddr, - txsch->lvl, reg, regval)) + if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, + txsch->lvl, reg, regval)) return NIX_AF_INVAL_TXSCHQ_CFG; + /* Check if shaping and coloring is supported */ + if (!is_txschq_shaping_valid(hw, req->lvl, reg)) + continue; + + val = rvu_read64(rvu, blkaddr, reg); + regval = (val & val_mask) | (regval & ~val_mask); + + /* Handle shaping state toggle specially */ + if (hw->cap.nix_shaper_toggle_wait && + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + req->lvl, reg, regval)) + continue; + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], @@ -1544,32 +2925,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, regval |= ((u64)nixlf << 24); } + /* Clear 'BP_ENA' config, if it's not allowed */ + if (!hw->cap.nix_tx_link_bp) { + if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) || + (schq_regbase & 0xFF00) == + NIX_AF_TL3_TL2X_LINKX_CFG(0, 0)) + regval &= ~BIT_ULL(13); + } + /* Mark config as done for TL1 by PF */ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); - mutex_lock(&rvu->rsrc_lock); - - map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); - map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); - - map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; - pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); + pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], + NIX_TXSCHQ_CFG_DONE); mutex_unlock(&rvu->rsrc_lock); } - rvu_write64(rvu, blkaddr, reg, regval); - - /* Check for SMQ flush, if so, poll for its completion */ + /* SMQ flush is special hence split register writes such + * that flush first and write rest of the bits later. + */ if (schq_regbase == NIX_AF_SMQX_CFG(0) && (regval & BIT_ULL(49))) { - err = rvu_poll_reg(rvu, blkaddr, - reg, BIT_ULL(49), true); - if (err) - return NIX_AF_SMQ_FLUSH_FAILED; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + regval &= ~BIT_ULL(49); } + rvu_write64(rvu, blkaddr, reg, regval); } + return 0; } @@ -1578,9 +2963,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, { u64 regval = req->vtag_size; - if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) + if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || + req->vtag_size > VTAGSIZE_T8) return -EINVAL; + /* RX VTAG Type 7 reserved for vf vlan */ + if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) + return NIX_AF_ERR_RX_VTAG_INUSE; + if (req->rx.capture_vtag) regval |= BIT_ULL(5); if (req->rx.strip_vtag) @@ -1591,36 +2981,201 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, return 0; } +static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, + u16 pcifunc, int index) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + if (vlan->entry2pfvf_map[index] != pcifunc) + return NIX_AF_ERR_PARAM; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); + + vlan->entry2pfvf_map[index] = 0; + rvu_free_rsrc(&vlan->rsrc, index); + + return 0; +} + +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) +{ + struct nix_txvlan *vlan; + struct nix_hw *nix_hw; + int index, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + vlan = &nix_hw->txvlan; + + mutex_lock(&vlan->rsrc_lock); + /* Scan all the entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < vlan->rsrc.max; index++) { + if (vlan->entry2pfvf_map[index] == pcifunc) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); + } + mutex_unlock(&vlan->rsrc_lock); +} + +static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, + u64 vtag, u8 size) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan; + u64 regval; + int index; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + + mutex_lock(&vlan->rsrc_lock); + + index = rvu_alloc_rsrc(&vlan->rsrc); + if (index < 0) { + mutex_unlock(&vlan->rsrc_lock); + return index; + } + + mutex_unlock(&vlan->rsrc_lock); + + regval = size ? vtag : vtag << 32; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), regval); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), size); + + return index; +} + +static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + u16 pcifunc = req->hdr.pcifunc; + int idx0 = req->tx.vtag0_idx; + int idx1 = req->tx.vtag1_idx; + struct nix_txvlan *vlan; + int err = 0; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + if (req->tx.free_vtag0 && req->tx.free_vtag1) + if (vlan->entry2pfvf_map[idx0] != pcifunc || + vlan->entry2pfvf_map[idx1] != pcifunc) + return NIX_AF_ERR_PARAM; + + mutex_lock(&vlan->rsrc_lock); + + if (req->tx.free_vtag0) { + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); + if (err) + goto exit; + } + + if (req->tx.free_vtag1) + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); + +exit: + mutex_unlock(&vlan->rsrc_lock); + return err; +} + +static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req, + struct nix_vtag_config_rsp *rsp) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan; + u16 pcifunc = req->hdr.pcifunc; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + if (req->tx.cfg_vtag0) { + rsp->vtag0_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag0, req->vtag_size); + + if (rsp->vtag0_idx < 0) + return NIX_AF_ERR_TX_VTAG_NOSPC; + + vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; + } + + if (req->tx.cfg_vtag1) { + rsp->vtag1_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag1, req->vtag_size); + + if (rsp->vtag1_idx < 0) + goto err_free; + + vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; + } + + return 0; + +err_free: + if (req->tx.cfg_vtag0) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); + + return NIX_AF_ERR_TX_VTAG_NOSPC; +} + int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, - struct msg_rsp *rsp) + struct nix_vtag_config_rsp *rsp) { - struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int blkaddr, nixlf, err; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; if (req->cfg_type) { + /* rx vtag configuration */ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); if (err) return NIX_AF_ERR_PARAM; } else { - /* TODO: handle tx vtag configuration */ - return 0; + /* tx vtag configuration */ + if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && + (req->tx.free_vtag0 || req->tx.free_vtag1)) + return NIX_AF_ERR_PARAM; + + if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) + return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); + + if (req->tx.free_vtag0 || req->tx.free_vtag1) + return nix_tx_vtag_decfg(rvu, blkaddr, req); } return 0; } -static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, - u16 pcifunc, int next, bool eol) +static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, + int mce, u8 op, u16 pcifunc, int next, + int index, u8 mce_op, bool eol) { struct nix_aq_enq_req aq_req; int err; @@ -1630,9 +3185,9 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, aq_req.op = op; aq_req.qidx = mce; - /* Forward bcast pkts to RQ0, RSS not needed */ - aq_req.mce.op = 0; - aq_req.mce.index = 0; + /* Use RSS with RSS index 0 */ + aq_req.mce.op = mce_op; + aq_req.mce.index = index; aq_req.mce.eol = eol; aq_req.mce.pf_func = pcifunc; aq_req.mce.next = next; @@ -1640,17 +3195,218 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, /* All fields valid */ *(u64 *)(&aq_req.mce_mask) = ~0ULL; - err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return err; } return 0; } -static int nix_update_mce_list(struct nix_mce_list *mce_list, - u16 pcifunc, int idx, bool add) +static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list) +{ + struct hlist_node *tmp; + struct mce *mce; + + /* Scan through the current list */ + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + hlist_del(&mce->node); + kfree(mce); + } + + mce_list->count = 0; + mce_list->max = 0; +} + +static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem) +{ + return elem->mce_start_index + elem->mcast_mce_list.count - 1; +} + +static int nix_update_ingress_mce_list_hw(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem) +{ + int idx, last_idx, next_idx, err; + struct nix_mce_list *mce_list; + struct mce *mce, *prev_mce; + + mce_list = &elem->mcast_mce_list; + idx = elem->mce_start_index; + last_idx = nix_get_last_mce_list_index(elem); + hlist_for_each_entry(mce, &mce_list->head, node) { + if (idx > last_idx) + break; + + if (!mce->is_active) { + if (idx == elem->mce_start_index) { + idx++; + prev_mce = mce; + elem->mce_start_index = idx; + continue; + } else if (idx == last_idx) { + err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE, + prev_mce->pcifunc, next_idx, + prev_mce->rq_rss_index, + prev_mce->dest_type, + false); + if (err) + return err; + + break; + } + } + + next_idx = idx + 1; + /* EOL should be set in last MCE */ + err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + mce->rq_rss_index, mce->dest_type, + (next_idx > last_idx) ? true : false); + if (err) + return err; + + idx++; + prev_mce = mce; + } + + return 0; +} + +static void nix_update_egress_mce_list_hw(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem) +{ + struct nix_mce_list *mce_list; + int idx, last_idx, next_idx; + struct mce *mce, *prev_mce; + u64 regval; + u8 eol; + + mce_list = &elem->mcast_mce_list; + idx = elem->mce_start_index; + last_idx = nix_get_last_mce_list_index(elem); + hlist_for_each_entry(mce, &mce_list->head, node) { + if (idx > last_idx) + break; + + if (!mce->is_active) { + if (idx == elem->mce_start_index) { + idx++; + prev_mce = mce; + elem->mce_start_index = idx; + continue; + } else if (idx == last_idx) { + regval = (next_idx << 16) | (1 << 12) | prev_mce->channel; + rvu_write64(rvu, nix_hw->blkaddr, + NIX_AF_TX_MCASTX(idx - 1), + regval); + break; + } + } + + eol = 0; + next_idx = idx + 1; + /* EOL should be set in last MCE */ + if (next_idx > last_idx) + eol = 1; + + regval = (next_idx << 16) | (eol << 12) | mce->channel; + rvu_write64(rvu, nix_hw->blkaddr, + NIX_AF_TX_MCASTX(idx), + regval); + idx++; + prev_mce = mce; + } +} + +static int nix_del_mce_list_entry(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem, + struct nix_mcast_grp_update_req *req) +{ + u32 num_entry = req->num_mce_entry; + struct nix_mce_list *mce_list; + struct mce *mce; + bool is_found; + int i; + + mce_list = &elem->mcast_mce_list; + for (i = 0; i < num_entry; i++) { + is_found = false; + hlist_for_each_entry(mce, &mce_list->head, node) { + /* If already exists, then delete */ + if (mce->pcifunc == req->pcifunc[i]) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + is_found = true; + break; + } + } + + if (!is_found) + return NIX_AF_ERR_INVALID_MCAST_DEL_REQ; + } + + mce_list->max = mce_list->count; + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + return 0; +} + +static int nix_add_mce_list_entry(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_mcast_grp_elem *elem, + struct nix_mcast_grp_update_req *req) +{ + u32 num_entry = req->num_mce_entry; + struct nix_mce_list *mce_list; + struct hlist_node *tmp; + struct mce *mce; + int i; + + mce_list = &elem->mcast_mce_list; + for (i = 0; i < num_entry; i++) { + mce = kzalloc(sizeof(*mce), GFP_KERNEL); + if (!mce) + goto free_mce; + + mce->pcifunc = req->pcifunc[i]; + mce->channel = req->channel[i]; + mce->rq_rss_index = req->rq_rss_index[i]; + mce->dest_type = req->dest_type[i]; + mce->is_active = 1; + hlist_add_head(&mce->node, &mce_list->head); + mce_list->count++; + } + + mce_list->max += num_entry; + + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + return 0; + +free_mce: + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + hlist_del(&mce->node); + kfree(mce); + mce_list->count--; + } + + return -ENOMEM; +} + +static int nix_update_mce_list_entry(struct nix_mce_list *mce_list, + u16 pcifunc, bool add) { struct mce *mce, *tail = NULL; bool delete = false; @@ -1661,6 +3417,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, if (mce->pcifunc == pcifunc && !add) { delete = true; break; + } else if (mce->pcifunc == pcifunc && add) { + /* entry already exists */ + return 0; } tail = mce; } @@ -1679,7 +3438,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; - mce->idx = idx; mce->pcifunc = pcifunc; if (!tail) hlist_add_head(&mce->node, &mce_list->head); @@ -1689,70 +3447,64 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, return 0; } -static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) +int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, + struct nix_mce_list *mce_list, + int mce_idx, int mcam_index, bool add) { - int err = 0, idx, next_idx, count; - struct nix_mce_list *mce_list; - struct mce *mce, *next_mce; + int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr; + struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_mcast *mcast; struct nix_hw *nix_hw; - struct rvu_pfvf *pfvf; - int blkaddr; + struct mce *mce; - /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ - if (is_afvf(pcifunc)) - return 0; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return 0; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return 0; - - mcast = &nix_hw->mcast; + if (!mce_list) + return -EINVAL; /* Get this PF/VF func's MCE index */ - pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); - idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); + idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK); - mce_list = &pfvf->bcast_mce_list; - if (idx > (pfvf->bcast_mce_idx + mce_list->max)) { + if (idx > (mce_idx + mce_list->max)) { dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, - pcifunc >> RVU_PFVF_PF_SHIFT); + rvu_get_pf(rvu->pdev, pcifunc)); return -EINVAL; } + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast = &nix_hw->mcast; mutex_lock(&mcast->mce_lock); - err = nix_update_mce_list(mce_list, pcifunc, idx, add); + err = nix_update_mce_list_entry(mce_list, pcifunc, add); if (err) goto end; /* Disable MCAM entry in NPC */ - - if (!mce_list->count) + if (!mce_list->count) { + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false); goto end; - count = mce_list->count; + } /* Dump the updated list to HW */ + idx = mce_idx; + last_idx = idx + mce_list->count - 1; hlist_for_each_entry(mce, &mce_list->head, node) { - next_idx = 0; - count--; - if (count) { - next_mce = hlist_entry(mce->node.next, - struct mce, node); - next_idx = next_mce->idx; - } + if (idx > last_idx) + break; + + next_idx = idx + 1; /* EOL should be set in last MCE */ - err = nix_setup_mce(rvu, mce->idx, - NIX_AQ_INSTOP_WRITE, mce->pcifunc, - next_idx, count ? false : true); + err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + 0, 1, + (next_idx > last_idx) ? true : false); if (err) goto end; + idx++; } end: @@ -1760,7 +3512,87 @@ end: return err; } -static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) +void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, + struct nix_mce_list **mce_list, int *mce_idx) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_pfvf *pfvf; + + if (!hw->cap.nix_rx_multicast || + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, + pcifunc & ~RVU_PFVF_FUNC_MASK))) { + *mce_list = NULL; + *mce_idx = 0; + return; + } + + /* Get this PF/VF func's MCE index */ + pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + + if (type == NIXLF_BCAST_ENTRY) { + *mce_list = &pfvf->bcast_mce_list; + *mce_idx = pfvf->bcast_mce_idx; + } else if (type == NIXLF_ALLMULTI_ENTRY) { + *mce_list = &pfvf->mcast_mce_list; + *mce_idx = pfvf->mcast_mce_idx; + } else if (type == NIXLF_PROMISC_ENTRY) { + *mce_list = &pfvf->promisc_mce_list; + *mce_idx = pfvf->promisc_mce_idx; + } else { + *mce_list = NULL; + *mce_idx = 0; + } +} + +static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, + int type, bool add) +{ + int err = 0, nixlf, blkaddr, mcam_index, mce_idx; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + struct nix_mce_list *mce_list; + int pf; + + /* skip multicast pkt replication for AF's VFs & SDP links */ + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) + return 0; + + if (!hw->cap.nix_rx_multicast) + return 0; + + pf = rvu_get_pf(rvu->pdev, pcifunc); + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return -EINVAL; + + nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx); + + mcam_index = npc_get_nixlf_mcam_index(mcam, + pcifunc & ~RVU_PFVF_FUNC_MASK, + nixlf, type); + err = nix_update_mce_list(rvu, pcifunc, mce_list, + mce_idx, mcam_index, add); + return err; +} + +static void nix_setup_mcast_grp(struct nix_hw *nix_hw) +{ + struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp; + + INIT_LIST_HEAD(&mcast_grp->mcast_grp_head); + mutex_init(&mcast_grp->mcast_grp_lock); + mcast_grp->next_grp_index = 1; + mcast_grp->count = 0; +} + +static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_mcast *mcast = &nix_hw->mcast; int err, pf, numvfs, idx; @@ -1778,23 +3610,52 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) numvfs = (cfg >> 12) & 0xFF; pfvf = &rvu->pf[pf]; - /* Save the start MCE */ - pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); + /* This NIX0/1 block mapped to PF ? */ + if (pfvf->nix_blkaddr != nix_hw->blkaddr) + continue; + + /* save start idx of broadcast mce list */ + pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1); + /* save start idx of multicast mce list */ + pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); + nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1); + + /* save the start idx of promisc mce list */ + pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS); + nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1); + for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pcifunc |= idx; /* Add dummy entries now, so that we don't have to check * for whether AQ_OP should be INIT/WRITE later on. * Will be updated when a NIXLF is attached/detached to * these PF/VFs. */ - err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx, - NIX_AQ_INSTOP_INIT, - pcifunc, 0, true); + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->bcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, 0, 1, true); + if (err) + return err; + + /* add dummy entries to multicast mce list */ + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->mcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, 0, 1, true); + if (err) + return err; + + /* add dummy entries to promisc mce list */ + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->promisc_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, 0, 1, true); if (err) return err; } @@ -1809,13 +3670,30 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) int err, size; size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F; - size = (1ULL << size); + size = BIT_ULL(size); + + /* Allocate bitmap for rx mce entries */ + mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE; + err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + if (err) + return -ENOMEM; + + /* Allocate bitmap for tx mce entries */ + mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX; + err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + return -ENOMEM; + } /* Alloc memory for multicast/mirror replication entries */ err = qmem_alloc(rvu->dev, &mcast->mce_ctx, - (256UL << MC_TBL_SIZE), size); - if (err) + mcast->mce_counter[NIX_MCAST_INGRESS].max, size); + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); return -ENOMEM; + } rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE, (u64)mcast->mce_ctx->iova); @@ -1828,8 +3706,11 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF; err = qmem_alloc(rvu->dev, &mcast->mcast_buf, (8UL << MC_BUF_CNT), size); - if (err) + if (err) { + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]); + rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]); return -ENOMEM; + } rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE, (u64)mcast->mcast_buf->iova); @@ -1843,14 +3724,41 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) mutex_init(&mcast->mce_lock); - return nix_setup_bcast_tables(rvu, nix_hw); + nix_setup_mcast_grp(nix_hw); + + return nix_setup_mce_tables(rvu, nix_hw); +} + +static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_txvlan *vlan = &nix_hw->txvlan; + int err; + + /* Allocate resource bimap for tx vtag def registers*/ + vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; + err = rvu_alloc_bitmap(&vlan->rsrc); + if (err) + return -ENOMEM; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, + sizeof(u16), GFP_KERNEL); + if (!vlan->entry2pfvf_map) + goto free_mem; + + mutex_init(&vlan->rsrc_lock); + return 0; + +free_mem: + kfree(vlan->rsrc.bmap); + return -ENOMEM; } static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_txsch *txsch; + int err, lvl, schq; u64 cfg, reg; - int err, lvl; /* Get scheduler queue count of each type and alloc * bitmap for each for alloc/free/attach operations. @@ -1888,8 +3796,24 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) sizeof(u32), GFP_KERNEL); if (!txsch->pfvf_map) return -ENOMEM; - memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32)); + for (schq = 0; schq < txsch->schq.max; schq++) + txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); + } + + /* Setup a default value of 8192 as DWRR MTU */ + if (rvu->hw->cap.nix_common_dwrr_mtu || + rvu->hw->cap.nix_multiple_dwrr_mtu) { + rvu_write64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), + convert_bytes_to_dwrr_mtu(8192)); + rvu_write64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK), + convert_bytes_to_dwrr_mtu(8192)); + rvu_write64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP), + convert_bytes_to_dwrr_mtu(8192)); } + return 0; } @@ -1944,21 +3868,81 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } -int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) +static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) +{ + /* CN10K supports LBK FIFO size 72 KB */ + if (rvu->hw->lbk_bufsize == 0x12000) + *max_mtu = CN10K_LBK_LINK_MAX_FRS; + else + *max_mtu = NIC_HW_MAX_FRS; +} + +static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) +{ + int fifo_size = rvu_cgx_get_fifolen(rvu); + + /* RPM supports FIFO len 128 KB and RPM2 supports double the + * FIFO len to accommodate 8 LMACS + */ + if (fifo_size == 0x20000 || fifo_size == 0x40000) + *max_mtu = CN10K_LMAC_LINK_MAX_FRS; + else + *max_mtu = NIC_HW_MAX_FRS; +} + +int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, + struct nix_hw_info *rsp) { - struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - int i, nixlf, blkaddr; - u64 stats; + u64 dwrr_mtu; + int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + if (is_lbk_vf(rvu, pcifunc)) + rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); + else + rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); + + rsp->min_mtu = NIC_HW_MIN_FRS; + + if (!rvu->hw->cap.nix_common_dwrr_mtu && + !rvu->hw->cap.nix_multiple_dwrr_mtu) { + /* Return '1' on OTx2 */ + rsp->rpm_dwrr_mtu = 1; + rsp->sdp_dwrr_mtu = 1; + rsp->lbk_dwrr_mtu = 1; + return 0; + } + + /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */ + dwrr_mtu = rvu_read64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM)); + rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + dwrr_mtu = rvu_read64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP)); + rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + dwrr_mtu = rvu_read64(rvu, blkaddr, + nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK)); + rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + return 0; +} + +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int i, nixlf, blkaddr, err; + u64 stats; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; /* Get stats count supported by HW */ stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); @@ -1987,6 +3971,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) return -ERANGE; } +/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ +#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) +/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ +#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) + static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { int idx, nr_field, key_off, field_marker, keyoff_marker; @@ -1994,6 +3983,8 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; + u32 l3_l4_src_dst; + int l4_key_offset = 0; if (!alg) return -EINVAL; @@ -2020,6 +4011,15 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) * group_member - Enabled when protocol is part of a group. */ + /* Last 4 bits (31:28) are reserved to specify SRC, DST + * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST, + * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST + * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST + */ + l3_l4_src_dst = flow_cfg; + /* Reset these 4 bits, so that these won't be part of key */ + flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK; + keyoff_marker = 0; max_key_off = 0; group_member = 0; nr_field = 0; key_off = 0; field_marker = 1; field = &tmp; max_bit_pos = fls(flow_cfg); @@ -2032,51 +4032,135 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) if (field_marker) memset(&tmp, 0, sizeof(tmp)); + field_marker = true; + keyoff_marker = true; switch (key_type) { case NIX_FLOW_KEY_TYPE_PORT: field->sel_chan = true; /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; - field_marker = true; - keyoff_marker = true; + break; + case NIX_FLOW_KEY_TYPE_IPV4_PROTO: + field->lid = NPC_LID_LC; + field->hdr_offset = 9; /* offset */ + field->bytesm1 = 0; /* 1 byte */ + field->ltype_match = NPC_LT_LC_IP; + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_IPV4: + case NIX_FLOW_KEY_TYPE_INNR_IPV4: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP; + } field->hdr_offset = 12; /* SIP offset */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ - field->ltype_mask = 0xF; /* Match only IPv4 */ - field_marker = true; + + /* Only SIP */ + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) + field->bytesm1 = 3; /* SIP, 4 bytes */ + + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { + /* Both SIP + DIP */ + if (field->bytesm1 == 3) { + field->bytesm1 = 7; /* SIP + DIP, 8B */ + } else { + /* Only DIP */ + field->hdr_offset = 16; /* DIP off */ + field->bytesm1 = 3; /* DIP, 4 bytes */ + } + } + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: + case NIX_FLOW_KEY_TYPE_INNR_IPV6: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP6; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) { + field->lid = NPC_LID_LG; + field->ltype_match = NPC_LT_LG_TU_IP6; + } field->hdr_offset = 8; /* SIP offset */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ - field->ltype_mask = 0xF; /* Match only IPv6 */ - field_marker = true; - keyoff_marker = true; + + /* Only SIP */ + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY) + field->bytesm1 = 15; /* SIP, 16 bytes */ + + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) { + /* Both SIP + DIP */ + if (field->bytesm1 == 15) { + /* SIP + DIP, 32 bytes */ + field->bytesm1 = 31; + } else { + /* Only DIP */ + field->hdr_offset = 24; /* DIP off */ + field->bytesm1 = 15; /* DIP,16 bytes */ + } + } + field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: case NIX_FLOW_KEY_TYPE_SCTP: + case NIX_FLOW_KEY_TYPE_INNR_TCP: + case NIX_FLOW_KEY_TYPE_INNR_UDP: + case NIX_FLOW_KEY_TYPE_INNR_SCTP: field->lid = NPC_LID_LD; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) + field->lid = NPC_LID_LH; field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ - if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) { + + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY) + field->bytesm1 = 1; /* SRC, 2 bytes */ + + if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) { + /* Both SRC + DST */ + if (field->bytesm1 == 1) { + /* SRC + DST, 4 bytes */ + field->bytesm1 = 3; + } else { + /* Only DIP */ + field->hdr_offset = 2; /* DST off */ + field->bytesm1 = 1; /* DST, 2 bytes */ + } + } + + /* Enum values for NPC_LID_LD and NPC_LID_LG are same, + * so no need to change the ltype_match, just change + * the lid for inner protocols + */ + BUILD_BUG_ON((int)NPC_LT_LD_TCP != + (int)NPC_LT_LH_TU_TCP); + BUILD_BUG_ON((int)NPC_LT_LD_UDP != + (int)NPC_LT_LH_TU_UDP); + BUILD_BUG_ON((int)NPC_LT_LD_SCTP != + (int)NPC_LT_LH_TU_SCTP); + + if ((key_type == NIX_FLOW_KEY_TYPE_TCP || + key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) && + valid_key) { field->ltype_match |= NPC_LT_LD_TCP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_UDP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP || + key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) && valid_key) { field->ltype_match |= NPC_LT_LD_UDP; group_member = true; - } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP && + } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) && valid_key) { field->ltype_match |= NPC_LT_LD_SCTP; group_member = true; } field->ltype_mask = ~field->ltype_match; - if (key_type == NIX_FLOW_KEY_TYPE_SCTP) { + if (key_type == NIX_FLOW_KEY_TYPE_SCTP || + key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) { /* Handle the case where any of the group item * is enabled in the group but not the final one */ @@ -2084,18 +4168,119 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) valid_key = true; group_member = false; } - field_marker = true; - keyoff_marker = true; } else { field_marker = false; keyoff_marker = false; } + + /* TCP/UDP/SCTP and ESP/AH falls at same offset so + * remember the TCP key offset of 40 byte hash key. + */ + if (key_type == NIX_FLOW_KEY_TYPE_TCP) + l4_key_offset = key_off; + break; + case NIX_FLOW_KEY_TYPE_NVGRE: + field->lid = NPC_LID_LD; + field->hdr_offset = 4; /* VSID offset */ + field->bytesm1 = 2; + field->ltype_match = NPC_LT_LD_NVGRE; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VXLAN: + case NIX_FLOW_KEY_TYPE_GENEVE: + field->lid = NPC_LID_LE; + field->bytesm1 = 2; + field->hdr_offset = 4; + field->ltype_mask = 0xF; + field_marker = false; + keyoff_marker = false; + + if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) { + field->ltype_match |= NPC_LT_LE_VXLAN; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) { + field->ltype_match |= NPC_LT_LE_GENEVE; + group_member = true; + } + + if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) { + if (group_member) { + field->ltype_mask = ~field->ltype_match; + field_marker = true; + keyoff_marker = true; + valid_key = true; + group_member = false; + } + } + break; + case NIX_FLOW_KEY_TYPE_ETH_DMAC: + case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC: + field->lid = NPC_LID_LA; + field->ltype_match = NPC_LT_LA_ETHER; + if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) { + field->lid = NPC_LID_LF; + field->ltype_match = NPC_LT_LF_TU_ETHER; + } + field->hdr_offset = 0; + field->bytesm1 = 5; /* DMAC 6 Byte */ + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_IPV6_EXT: + field->lid = NPC_LID_LC; + field->hdr_offset = 40; /* IPV6 hdr */ + field->bytesm1 = 0; /* 1 Byte ext hdr*/ + field->ltype_match = NPC_LT_LC_IP6_EXT; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_GTPU: + field->lid = NPC_LID_LE; + field->hdr_offset = 4; + field->bytesm1 = 3; /* 4 bytes TID*/ + field->ltype_match = NPC_LT_LE_GTPU; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_CUSTOM0: + field->lid = NPC_LID_LC; + field->hdr_offset = 6; + field->bytesm1 = 1; /* 2 Bytes*/ + field->ltype_match = NPC_LT_LC_CUSTOM0; + field->ltype_mask = 0xF; + break; + case NIX_FLOW_KEY_TYPE_VLAN: + field->lid = NPC_LID_LB; + field->hdr_offset = 2; /* Skip TPID (2-bytes) */ + field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */ + field->ltype_match = NPC_LT_LB_CTAG; + field->ltype_mask = 0xF; + field->fn_mask = 1; /* Mask out the first nibble */ + break; + case NIX_FLOW_KEY_TYPE_AH: + case NIX_FLOW_KEY_TYPE_ESP: + field->hdr_offset = 0; + field->bytesm1 = 7; /* SPI + sequence number */ + field->ltype_mask = 0xF; + field->lid = NPC_LID_LE; + field->ltype_match = NPC_LT_LE_ESP; + if (key_type == NIX_FLOW_KEY_TYPE_AH) { + field->lid = NPC_LID_LD; + field->ltype_match = NPC_LT_LD_AH; + field->hdr_offset = 4; + keyoff_marker = false; + } break; } field->ena = 1; /* Found a valid flow key type */ if (valid_key) { + /* Use the key offset of TCP/UDP/SCTP fields + * for ESP/AH fields. + */ + if (key_type == NIX_FLOW_KEY_TYPE_ESP || + key_type == NIX_FLOW_KEY_TYPE_AH) + key_off = l4_key_offset; field->key_offset = key_off; memcpy(&alg[nr_field], field, sizeof(*field)); max_key_off = max(max_key_off, field->bytesm1 + 1); @@ -2126,7 +4311,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) hw = get_nix_hw(rvu->hw, blkaddr); if (!hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* No room to add new flow hash algoritham */ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) @@ -2155,22 +4340,18 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct nix_rss_flowkey_cfg_rsp *rsp) { - struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int alg_idx, nixlf, blkaddr; struct nix_hw *nix_hw; + int err; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); /* Failed to get algo index from the exiting list, reserve new */ @@ -2259,26 +4440,49 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { - struct rvu_hwinfo *hw = rvu->hw; + bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; - int blkaddr, nixlf; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return err; pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + /* untrusted VF can't overwrite admin(PF) changes */ + if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && + (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) { + dev_warn(rvu->dev, + "MAC address set by admin(PF) cannot be overwritten by untrusted VF"); + return -EPERM; + } ether_addr_copy(pfvf->mac_addr, req->mac_addr); rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) + ether_addr_copy(pfvf->default_mac, req->mac_addr); + + return 0; +} + +int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu, + struct msg_req *req, + struct nix_get_mac_addr_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + + if (!is_nixlf_attached(rvu, pcifunc)) + return NIX_AF_ERR_AF_LF_INVALID; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } @@ -2286,35 +4490,73 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) { - bool allmulti = false, disable_promisc = false; - struct rvu_hwinfo *hw = rvu->hw; + bool allmulti, promisc, nix_rx_multicast; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int blkaddr, nixlf; + int nixlf, err; pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false; + allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false; + pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false; - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list; - if (req->mode & NIX_RX_MODE_PROMISC) - allmulti = false; - else if (req->mode & NIX_RX_MODE_ALLMULTI) - allmulti = true; - else - disable_promisc = true; + if (is_vf(pcifunc) && !nix_rx_multicast && + (promisc || allmulti)) { + dev_warn_ratelimited(rvu->dev, + "VF promisc/multicast not supported\n"); + return 0; + } - if (disable_promisc) - rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); - else - rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, - pfvf->rx_chan_base, allmulti); + /* untrusted VF can't configure promisc/allmulti */ + if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && + (promisc || allmulti)) + return 0; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + return err; - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + if (nix_rx_multicast) { + /* add/del this PF_FUNC to/from mcast pkt replication list */ + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY, + allmulti); + if (err) { + dev_err(rvu->dev, + "Failed to update pcifunc 0x%x to multicast list\n", + pcifunc); + return err; + } + + /* add/del this PF_FUNC to/from promisc pkt replication list */ + err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY, + promisc); + if (err) { + dev_err(rvu->dev, + "Failed to update pcifunc 0x%x to promisc list\n", + pcifunc); + return err; + } + } + + /* install/uninstall allmulti entry */ + if (allmulti) { + rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base); + } else { + if (!nix_rx_multicast) + rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false); + } + + /* install/uninstall promisc entry */ + if (promisc) + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); + else + if (!nix_rx_multicast) + rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false); return 0; } @@ -2322,7 +4564,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, static void nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct rvu_pfvf *pfvf; int maxlen, minlen; int numvfs, hwvf; @@ -2369,12 +4611,13 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); - int blkaddr, schq, link = -1; - struct nix_txsch *txsch; - u64 cfg, lmac_fifo_len; + int pf = rvu_get_pf(rvu->pdev, pcifunc); + int blkaddr, link = -1; struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; + u16 max_mtu; + u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -2382,33 +4625,19 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; + + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) + rvu_get_lbk_link_max_frs(rvu, &max_mtu); + else + rvu_get_lmac_link_max_frs(rvu, &max_mtu); - if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) + if (!req->sdp_link && req->maxlen > max_mtu) return NIX_AF_ERR_FRS_INVALID; if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) return NIX_AF_ERR_FRS_INVALID; - /* Check if requester wants to update SMQ's */ - if (!req->update_smq) - goto rx_frscfg; - - /* Update min/maxlen in each of the SMQ attached to this PF/VF */ - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; - mutex_lock(&rvu->rsrc_lock); - for (schq = 0; schq < txsch->schq.max; schq++) { - if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) - continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); - if (req->update_minlen) - cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - } - mutex_unlock(&rvu->rsrc_lock); - -rx_frscfg: /* Check if config is for SDP link */ if (req->sdp_link) { if (!hw->sdp_links) @@ -2424,114 +4653,36 @@ rx_frscfg: link = (cgx * hw->lmac_per_cgx) + lmac; } else if (pf == 0) { /* For VFs of PF0 ingress is LBK port, so config LBK link */ - link = hw->cgx_links; + pfvf = rvu_get_pfvf(rvu, pcifunc); + link = hw->cgx_links + pfvf->lbkid; + } else if (is_rep_dev(rvu, pcifunc)) { + link = hw->cgx_links + 0; } if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; +linkcfg: nix_find_link_frs(rvu, req, pcifunc); -linkcfg: cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) cfg = (cfg & ~0xFFFFULL) | req->minlen; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); - if (req->sdp_link || pf == 0) - return 0; - - /* Update transmit credits for CGX links */ - lmac_fifo_len = - CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); - cfg &= ~(0xFFFFFULL << 12); - cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg); - - return 0; -} - -int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) -{ - struct npc_mcam_alloc_entry_req alloc_req = { }; - struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; - struct npc_mcam_free_entry_req free_req = { }; - u16 pcifunc = req->hdr.pcifunc; - int blkaddr, nixlf, err; - struct rvu_pfvf *pfvf; - - /* LBK VFs do not have separate MCAM UCAST entry hence - * skip allocating rxvlan for them - */ - if (is_afvf(pcifunc)) - return 0; - - pfvf = rvu_get_pfvf(rvu, pcifunc); - if (pfvf->rxvlan) - return 0; - - /* alloc new mcam entry */ - alloc_req.hdr.pcifunc = pcifunc; - alloc_req.count = 1; - - err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, - &alloc_rsp); - if (err) - return err; - - /* update entry to enable rxvlan offload */ - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - pfvf->rxvlan_index = alloc_rsp.entry_list[0]; - /* all it means is that rxvlan_index is valid */ - pfvf->rxvlan = true; - - err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - if (err) - goto free_entry; - return 0; -free_entry: - free_req.hdr.pcifunc = pcifunc; - free_req.entry = alloc_rsp.entry_list[0]; - rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); - pfvf->rxvlan = false; - return err; } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, struct msg_rsp *rsp) { - struct rvu_hwinfo *hw = rvu->hw; - u16 pcifunc = req->hdr.pcifunc; - struct rvu_block *block; - struct rvu_pfvf *pfvf; - int nixlf, blkaddr; + int nixlf, blkaddr, err; u64 cfg; - pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - - block = &hw->block[blkaddr]; - nixlf = rvu_get_lf(rvu, block, pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr); + if (err) + return err; cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); /* Set the interface configuration */ @@ -2545,6 +4696,11 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, else cfg &= ~BIT_ULL(40); + if (req->len_verify & NIX_RX_DROP_RE) + cfg |= BIT_ULL(32); + else + cfg &= ~BIT_ULL(32); + if (req->csum_verify & BIT(0)) cfg |= BIT_ULL(37); else @@ -2555,11 +4711,27 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, return 0; } -static void nix_link_config(struct rvu *rvu, int blkaddr) +static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) +{ + return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ +} + +static void nix_link_config(struct rvu *rvu, int blkaddr, + struct nix_hw *nix_hw) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; - u64 tx_credits; + u16 lbk_max_frs, lmac_max_frs; + unsigned long lmac_bmap; + u64 tx_credits, cfg; + u64 lmac_fifo_len; + int iter; + + rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); + rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + + /* Set SDP link credit */ + rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); /* Set default min/max packet lengths allowed on NIX Rx links. * @@ -2567,15 +4739,25 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) * as undersize and report them to SW as error pkts, hence * setting it to 40 bytes. */ - for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { + for (link = 0; link < hw->cgx_links; link++) { rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); } + for (link = hw->cgx_links; link < hw->lbk_links; link++) { + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); + } if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); + } + + /* Get MCS external bypass status for CN10K-B */ + if (mcs_get_blkcnt() == 1) { + /* Adjust for 2 credits when external bypass is disabled */ + nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2; } /* Set credits for Tx links assuming max packet length allowed. @@ -2583,30 +4765,42 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; - /* Enable credits and set credit pkt count to max allowed */ - tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + /* Skip when cgx is not available or lmac cnt is zero */ + if (lmac_cnt <= 0) + continue; slink = cgx * hw->lmac_per_cgx; - for (link = slink; link < (slink + lmac_cnt); link++) { - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link), - tx_credits); + + /* Get LMAC id's from bitmap */ + lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { + lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); + if (!lmac_fifo_len) { + dev_err(rvu->dev, + "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", + __func__, cgx, iter); + continue; + } + tx_credits = (lmac_fifo_len - lmac_max_frs) / 16; + /* Enable credits and set credit pkt count to max allowed */ + cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt); + + link = iter + slink; + nix_hw->tx_credits[link] = tx_credits; rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), - tx_credits); + NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); } } /* Set Tx credits for LBK link */ slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { - tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ + tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); + nix_hw->tx_credits[link] = tx_credits; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); - rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits); } } @@ -2674,6 +4868,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Do not bypass NDC cache */ cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG); cfg &= ~0x3FFEULL; +#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING + /* Disable caching of SQB aka SQEs */ + cfg |= 0x04ULL; +#endif rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg); /* Result structure can be followed by RQ/SQ/CQ context at @@ -2692,38 +4890,70 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) return 0; } -int rvu_nix_init(struct rvu *rvu) +static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; + u64 hw_const; + + hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + + /* On OcteonTx2 DWRR quantum is directly configured into each of + * the transmit scheduler queues. And PF/VF drivers were free to + * config any value upto 2^24. + * On CN10K, HW is modified, the quantum configuration at scheduler + * queues is in terms of weight. And SW needs to setup a base DWRR MTU + * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do + * 'DWRR MTU * weight' to get the quantum. + * + * Check if HW uses a common MTU for all DWRR quantum configs. + * On OcteonTx2 this register field is '0'. + */ + if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61))) + hw->cap.nix_common_dwrr_mtu = true; + + if (hw_const & BIT_ULL(61)) + hw->cap.nix_multiple_dwrr_mtu = true; +} + +static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) +{ + const struct npc_lt_def_cfg *ltdefs; + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr = nix_hw->blkaddr; struct rvu_block *block; - int blkaddr, err; + int err; u64 cfg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return 0; block = &hw->block[blkaddr]; - /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt - * internal state when conditional clocks are turned off. - * Hence enable them. - */ - if (is_rvu_9xxx_A0(rvu)) + if (is_rvu_96xx_B0(rvu)) { + /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt + * internal state when conditional clocks are turned off. + * Hence enable them. + */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, - rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL); + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); + } + + /* Set chan/link to backpressure TL3 instead of TL2 */ + rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); + + /* Disable SQ manager's sticky mode operation (set TM6 = 0) + * This sticky mode is known to cause SQ stalls when multiple + * SQs are mapped to same SMQ and transmitting pkts at a time. + */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); + cfg &= ~BIT_ULL(15); + rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); + ltdefs = rvu->kpu.lt_def; /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); if (err) return err; - /* Set num of links of each type */ - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); - hw->cgx = (cfg >> 12) & 0xF; - hw->lmac_per_cgx = (cfg >> 8) & 0xF; - hw->cgx_links = hw->cgx * hw->lmac_per_cgx; - hw->lbk_links = 1; - hw->sdp_links = 1; + /* Setup capabilities of the NIX block */ + rvu_nix_setup_capabilities(rvu, blkaddr); /* Initialize admin queue */ err = nix_aq_init(rvu, block); @@ -2733,82 +4963,175 @@ int rvu_nix_init(struct rvu *rvu) /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); - if (blkaddr == BLKADDR_NIX0) { - hw->nix0 = devm_kzalloc(rvu->dev, - sizeof(struct nix_hw), GFP_KERNEL); - if (!hw->nix0) - return -ENOMEM; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG); + + /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ + cfg |= 1ULL; + if (!is_rvu_otx2(rvu)) + cfg |= NIX_PTP_1STEP_EN; + + rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg); + + if (!is_rvu_otx2(rvu)) + rvu_nix_block_cn10k_init(rvu, nix_hw); + + if (is_block_implemented(hw, blkaddr)) { + err = nix_setup_txschq(rvu, nix_hw, blkaddr); + if (err) + return err; + + err = nix_setup_ipolicers(rvu, nix_hw, blkaddr); + if (err) + return err; + + err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); + if (err) + return err; - err = nix_setup_txschq(rvu, hw->nix0, blkaddr); + err = nix_setup_mcast(rvu, nix_hw, blkaddr); if (err) return err; - err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr); + err = nix_setup_txvlan(rvu, nix_hw); if (err) return err; - err = nix_setup_mcast(rvu, hw->nix0, blkaddr); + err = nix_setup_bpids(rvu, nix_hw, blkaddr); if (err) return err; /* Configure segmentation offload formats */ - nix_setup_lso(rvu, hw->nix0, blkaddr); + nix_setup_lso(rvu, nix_hw, blkaddr); /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. * This helps HW protocol checker to identify headers * and validate length and checksums. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, - (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); + (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) | + ltdefs->rx_ol2.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, - (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); + (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) | + ltdefs->rx_oip4.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) | + ltdefs->rx_iip4.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, - (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F); + (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) | + ltdefs->rx_oip6.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, - (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F); + (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) | + ltdefs->rx_iip6.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, - (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); + (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) | + ltdefs->rx_otcp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F); + (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) | + ltdefs->rx_itcp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, - (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); + (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) | + ltdefs->rx_oudp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F); + (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) | + ltdefs->rx_iudp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, - (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F); + (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) | + ltdefs->rx_osctp.ltype_mask); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, - (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) | - 0x0F); + (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) | + ltdefs->rx_isctp.ltype_mask); + + if (!is_rvu_otx2(rvu)) { + /* Enable APAD calculation for other protocols + * matching APAD0 and APAD1 lt def registers. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0, + (ltdefs->rx_apad0.valid << 11) | + (ltdefs->rx_apad0.lid << 8) | + (ltdefs->rx_apad0.ltype_match << 4) | + ltdefs->rx_apad0.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1, + (ltdefs->rx_apad1.valid << 11) | + (ltdefs->rx_apad1.lid << 8) | + (ltdefs->rx_apad1.ltype_match << 4) | + ltdefs->rx_apad1.ltype_mask); + + /* Receive ethertype definition register defines layer + * information in NPC_RESULT_S to identify the Ethertype + * location in L2 header. Used for Ethertype overwriting + * in inline IPsec flow. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0), + (ltdefs->rx_et[0].offset << 12) | + (ltdefs->rx_et[0].valid << 11) | + (ltdefs->rx_et[0].lid << 8) | + (ltdefs->rx_et[0].ltype_match << 4) | + ltdefs->rx_et[0].ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1), + (ltdefs->rx_et[1].offset << 12) | + (ltdefs->rx_et[1].valid << 11) | + (ltdefs->rx_et[1].lid << 8) | + (ltdefs->rx_et[1].ltype_match << 4) | + ltdefs->rx_et[1].ltype_mask); + } err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); if (err) return err; + nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, + sizeof(u64), GFP_KERNEL); + if (!nix_hw->tx_credits) + return -ENOMEM; + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ - nix_link_config(rvu, blkaddr); + nix_link_config(rvu, blkaddr, nix_hw); + + /* Enable Channel backpressure */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); } return 0; } -void rvu_nix_freemem(struct rvu *rvu) +int rvu_nix_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; - struct rvu_block *block; + struct nix_hw *nix_hw; + int blkaddr = 0, err; + int i = 0; + + hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), + GFP_KERNEL); + if (!hw->nix) + return -ENOMEM; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + nix_hw = &hw->nix[i]; + nix_hw->rvu = rvu; + nix_hw->blkaddr = blkaddr; + err = rvu_nix_block_init(rvu, nix_hw); + if (err) + return err; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + i++; + } + + return 0; +} + +static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, + struct rvu_block *block) +{ struct nix_txsch *txsch; struct nix_mcast *mcast; + struct nix_txvlan *vlan; struct nix_hw *nix_hw; - int blkaddr, lvl; + int lvl; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return; - - block = &hw->block[blkaddr]; rvu_aq_free(rvu, block->aq); - if (blkaddr == BLKADDR_NIX0) { + if (is_block_implemented(rvu->hw, blkaddr)) { nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return; @@ -2818,6 +5141,14 @@ void rvu_nix_freemem(struct rvu *rvu) kfree(txsch->schq.bmap); } + kfree(nix_hw->tx_credits); + + nix_ipolicer_freemem(rvu, nix_hw); + + vlan = &nix_hw->txvlan; + kfree(vlan->rsrc.bmap); + mutex_destroy(&vlan->rsrc_lock); + mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); @@ -2825,64 +5156,179 @@ void rvu_nix_freemem(struct rvu *rvu) } } -static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) +void rvu_nix_freemem(struct rvu *rvu) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr = 0; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + block = &hw->block[blkaddr]; + rvu_nix_block_freemem(rvu, blkaddr, block); + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } +} + +static void nix_mcast_update_action(struct rvu *rvu, + struct nix_mcast_grp_elem *elem) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_rx_action rx_action = { 0 }; + struct nix_tx_action tx_action = { 0 }; + int npc_blkaddr; + + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (elem->dir == NIX_MCAST_INGRESS) { + *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam, + npc_blkaddr, + elem->mcam_index); + rx_action.index = elem->mce_start_index; + npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, + *(u64 *)&rx_action); + } else { + *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam, + npc_blkaddr, + elem->mcam_index); + tx_action.index = elem->mce_start_index; + npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index, + *(u64 *)&tx_action); + } +} + +static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; - *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (*nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + mcast_grp = &nix_hw->mcast_grp; - return 0; + mutex_lock(&mcast_grp->mcast_grp_lock); + list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) { + struct nix_mce_list *mce_list; + struct mce *mce; + + /* Iterate the group elements and disable the element which + * received the disable request. + */ + mce_list = &elem->mcast_mce_list; + hlist_for_each_entry(mce, &mce_list->head, node) { + if (mce->pcifunc == pcifunc) { + mce->is_active = is_active; + break; + } + } + + /* Dump the updated list to HW */ + if (elem->dir == NIX_MCAST_INGRESS) + nix_update_ingress_mce_list_hw(rvu, nix_hw, elem); + else + nix_update_egress_mce_list_hw(rvu, nix_hw, elem); + + /* Update the multicast index in NPC rule */ + nix_mcast_update_action(rvu, elem); + } + mutex_unlock(&mcast_grp->mcast_grp_lock); } int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int nixlf, err; + struct rvu_pfvf *pfvf; + int nixlf, err, pf; - err = nix_get_nixlf(rvu, pcifunc, &nixlf); + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; + /* Enable the interface if it is in any multicast list */ + nix_mcast_update_mce_entry(rvu, pcifunc, 1); + rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); - return 0; + + npc_mcam_enable_flows(rvu, pcifunc); + + pfvf = rvu_get_pfvf(rvu, pcifunc); + set_bit(NIXLF_INITIALIZED, &pfvf->flags); + + rvu_switch_update_rules(rvu, pcifunc, true); + + pf = rvu_get_pf(rvu->pdev, pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, true); + + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int nixlf, err; + struct rvu_pfvf *pfvf; + int nixlf, err, pf; - err = nix_get_nixlf(rvu, pcifunc, &nixlf); + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) return err; - rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + /* Disable the interface if it is in any multicast list */ + nix_mcast_update_mce_entry(rvu, pcifunc, 0); + + + pfvf = rvu_get_pfvf(rvu, pcifunc); + clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + + err = rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (err) + return err; + + rvu_switch_update_rules(rvu, pcifunc, false); + rvu_cgx_tx_enable(rvu, pcifunc, true); + + pf = rvu_get_pf(rvu->pdev, pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; } +#define RX_SA_BASE GENMASK_ULL(52, 7) + void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(rvu->pdev, pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + u64 sa_base; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); nix_interface_deinit(rvu, pcifunc, nixlf); nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); + clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); + + rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (pfvf->sq_ctx) { ctx_req.ctype = NIX_AQ_CTYPE_SQ; err = nix_lf_hwctx_disable(rvu, &ctx_req); @@ -2904,7 +5350,86 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + + /* reset priority flow control config */ + rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0); + + /* reset 802.3x flow control config */ + rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0); + nix_ctx_free(rvu, pfvf); + + nix_free_all_bandprof(rvu, pcifunc); + + sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf)); + if (FIELD_GET(RX_SA_BASE, sa_base)) { + err = rvu_cpt_ctx_flush(rvu, pcifunc); + if (err) + dev_err(rvu->dev, + "CPT ctx flush failed with error: %d\n", err); + } +} + +#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) + +static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr, pf; + int nixlf; + u64 cfg; + + pf = rvu_get_pf(rvu->pdev, pcifunc); + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) + return 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf)); + + if (enable) + cfg |= NIX_AF_LFX_TX_CFG_PTP_EN; + else + cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true); +} + +int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false); } int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, @@ -2924,7 +5449,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* Find existing matching LSO format, if any */ for (idx = 0; idx < nix_hw->lso.in_use; idx++) { @@ -2957,3 +5482,1171 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } + +#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) +#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) +#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) +#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) + +#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) +#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) + +#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32) +#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22) +#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0) + +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, + int blkaddr) +{ + u8 cpt_idx, cpt_blkaddr; + u64 val; + + cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + if (req->enable) { + val = 0; + /* Enable context prefetching */ + if (!is_rvu_otx2(rvu)) + val |= BIT_ULL(51); + + /* Set OPCODE and EGRP */ + val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); + val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); + val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, + req->inst_qsel.cpt_pf_func); + + if (!is_rvu_otx2(rvu)) { + cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : + BLKADDR_CPT1; + val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); + } + + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + val); + + /* Set CPT credit */ + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); + + val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit); + val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid); + val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + 0x0); + val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx)); + if ((val & 0x3FFFFF) != 0x3FFFFF) + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF - val); + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); + + return 0; +} + +int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu, + struct msg_req *req, + struct nix_inline_ipsec_cfg *rsp) + +{ + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG); + rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val); + rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val); + rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val); + rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val); + + val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0)); + rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val); + rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val); + rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val); + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, + struct nix_inline_ipsec_lf_cfg *req, + struct msg_rsp *rsp) +{ + int lf, blkaddr, err; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); + if (err) + return err; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + + if (blkaddr == BLKADDR_NIX1) + val |= BIT_ULL(46); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} + +void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) +{ + bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + + /* overwrite vf mac address with default_mac */ + if (from_vf) + ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); +} + +/* NIX ingress policers or bandwidth profiles APIs */ +static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr) +{ + struct npc_lt_def_cfg defs, *ltdefs; + + ltdefs = &defs; + memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg)); + + /* Extract PCP and DEI fields from outer VLAN from byte offset + * 2 from the start of LB_PTR (ie TAG). + * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN + * fields are considered when 'Tunnel enable' is set in profile. + */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI, + (2UL << 12) | (ltdefs->ovlan.lid << 8) | + (ltdefs->ovlan.ltype_match << 4) | + ltdefs->ovlan.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI, + (2UL << 12) | (ltdefs->ivlan.lid << 8) | + (ltdefs->ivlan.ltype_match << 4) | + ltdefs->ivlan.ltype_mask); + + /* DSCP field in outer and tunneled IPv4 packets */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP, + (1UL << 12) | (ltdefs->rx_oip4.lid << 8) | + (ltdefs->rx_oip4.ltype_match << 4) | + ltdefs->rx_oip4.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP, + (1UL << 12) | (ltdefs->rx_iip4.lid << 8) | + (ltdefs->rx_iip4.ltype_match << 4) | + ltdefs->rx_iip4.ltype_mask); + + /* DSCP field (traffic class) in outer and tunneled IPv6 packets */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP, + (1UL << 11) | (ltdefs->rx_oip6.lid << 8) | + (ltdefs->rx_oip6.ltype_match << 4) | + ltdefs->rx_oip6.ltype_mask); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP, + (1UL << 11) | (ltdefs->rx_iip6.lid << 8) | + (ltdefs->rx_iip6.ltype_match << 4) | + ltdefs->rx_iip6.ltype_mask); +} + +static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw, + int layer, int prof_idx) +{ + struct nix_cn10k_aq_enq_req aq_req; + int rc; + + memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + + aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14); + aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req.op = NIX_AQ_INSTOP_INIT; + + /* Context is all zeros, submit to AQ */ + rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)&aq_req, NULL); + if (rc) + dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n", + layer, prof_idx); + return rc; +} + +static int nix_setup_ipolicers(struct rvu *rvu, + struct nix_hw *nix_hw, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct nix_ipolicer *ipolicer; + int err, layer, prof_idx; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + if (!(cfg & BIT_ULL(61))) { + hw->cap.ipolicer = false; + return 0; + } + + hw->cap.ipolicer = true; + nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS, + sizeof(*ipolicer), GFP_KERNEL); + if (!nix_hw->ipolicer) + return -ENOMEM; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST); + + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + ipolicer = &nix_hw->ipolicer[layer]; + switch (layer) { + case BAND_PROF_LEAF_LAYER: + ipolicer->band_prof.max = cfg & 0XFFFF; + break; + case BAND_PROF_MID_LAYER: + ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF; + break; + case BAND_PROF_TOP_LAYER: + ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF; + break; + } + + if (!ipolicer->band_prof.max) + continue; + + err = rvu_alloc_bitmap(&ipolicer->band_prof); + if (err) + return err; + + ipolicer->pfvf_map = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + if (!ipolicer->pfvf_map) + return -ENOMEM; + + ipolicer->match_id = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + if (!ipolicer->match_id) + return -ENOMEM; + + for (prof_idx = 0; + prof_idx < ipolicer->band_prof.max; prof_idx++) { + /* Set AF as current owner for INIT ops to succeed */ + ipolicer->pfvf_map[prof_idx] = 0x00; + + /* There is no enable bit in the profile context, + * so no context disable. So let's INIT them here + * so that PF/VF later on have to just do WRITE to + * setup policer rates and config. + */ + err = nix_init_policer_context(rvu, nix_hw, + layer, prof_idx); + if (err) + return err; + } + + /* Allocate memory for maintaining ref_counts for MID level + * profiles, this will be needed for leaf layer profiles' + * aggregation. + */ + if (layer != BAND_PROF_MID_LAYER) + continue; + + ipolicer->ref_count = devm_kcalloc(rvu->dev, + ipolicer->band_prof.max, + sizeof(u16), GFP_KERNEL); + if (!ipolicer->ref_count) + return -ENOMEM; + } + + /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */ + rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19); + + nix_config_rx_pkt_policer_precolor(rvu, blkaddr); + + return 0; +} + +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_ipolicer *ipolicer; + int layer; + + if (!rvu->hw->cap.ipolicer) + return; + + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + ipolicer = &nix_hw->ipolicer[layer]; + + if (!ipolicer->band_prof.max) + continue; + + kfree(ipolicer->band_prof.bmap); + } +} + +#define NIX_BW_PROF_HI_MASK GENMASK(10, 7) + +static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, + struct nix_hw *nix_hw, u16 pcifunc) +{ + struct nix_ipolicer *ipolicer; + int layer, hi_layer, prof_idx; + + /* Bits [15:14] in profile index represent layer */ + layer = (req->qidx >> 14) & 0x03; + prof_idx = req->qidx & 0x3FFF; + + ipolicer = &nix_hw->ipolicer[layer]; + if (prof_idx >= ipolicer->band_prof.max) + return -EINVAL; + + /* Check if the profile is allocated to the requesting PCIFUNC or not + * with the exception of AF. AF is allowed to read and update contexts. + */ + if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc) + return -EINVAL; + + /* If this profile is linked to higher layer profile then check + * if that profile is also allocated to the requesting PCIFUNC + * or not. + */ + if (!req->prof.hl_en) + return 0; + + /* Leaf layer profile can link only to mid layer and + * mid layer to top layer. + */ + if (layer == BAND_PROF_LEAF_LAYER) + hi_layer = BAND_PROF_MID_LAYER; + else if (layer == BAND_PROF_MID_LAYER) + hi_layer = BAND_PROF_TOP_LAYER; + else + return -EINVAL; + + ipolicer = &nix_hw->ipolicer[hi_layer]; + prof_idx = FIELD_PREP(NIX_BW_PROF_HI_MASK, req->prof.band_prof_id_h); + prof_idx |= req->prof.band_prof_id; + if (prof_idx >= ipolicer->band_prof.max || + ipolicer->pfvf_map[prof_idx] != pcifunc) + return -EINVAL; + + return 0; +} + +int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu, + struct nix_bandprof_alloc_req *req, + struct nix_bandprof_alloc_rsp *rsp) +{ + int blkaddr, layer, prof, idx, err; + u16 pcifunc = req->hdr.pcifunc; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + if (!req->prof_count[layer]) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + for (idx = 0; idx < req->prof_count[layer]; idx++) { + /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */ + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; + + prof = rvu_alloc_rsrc(&ipolicer->band_prof); + if (prof < 0) + break; + rsp->prof_count[layer]++; + rsp->prof_idx[layer][idx] = prof; + ipolicer->pfvf_map[prof] = pcifunc; + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, layer, prof_idx, err; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + /* Free all the profiles allocated to the PCIFUNC */ + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + ipolicer = &nix_hw->ipolicer[layer]; + + for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) { + if (ipolicer->pfvf_map[prof_idx] != pcifunc) + continue; + + /* Clear ratelimit aggregation, if any */ + if (layer == BAND_PROF_LEAF_LAYER && + ipolicer->match_id[prof_idx]) + nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); + + ipolicer->pfvf_map[prof_idx] = 0x00; + ipolicer->match_id[prof_idx] = 0; + rvu_free_rsrc(&ipolicer->band_prof, prof_idx); + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu, + struct nix_bandprof_free_req *req, + struct msg_rsp *rsp) +{ + int blkaddr, layer, prof_idx, idx, err; + u16 pcifunc = req->hdr.pcifunc; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + + if (req->free_all) + return nix_free_all_bandprof(rvu, pcifunc); + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mutex_lock(&rvu->rsrc_lock); + /* Free the requested profile indices */ + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + if (!req->prof_count[layer]) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + for (idx = 0; idx < req->prof_count[layer]; idx++) { + if (idx == MAX_BANDPROF_PER_PFFUNC) + break; + prof_idx = req->prof_idx[layer][idx]; + if (prof_idx >= ipolicer->band_prof.max || + ipolicer->pfvf_map[prof_idx] != pcifunc) + continue; + + /* Clear ratelimit aggregation, if any */ + if (layer == BAND_PROF_LEAF_LAYER && + ipolicer->match_id[prof_idx]) + nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx); + + ipolicer->pfvf_map[prof_idx] = 0x00; + ipolicer->match_id[prof_idx] = 0; + rvu_free_rsrc(&ipolicer->band_prof, prof_idx); + } + } + mutex_unlock(&rvu->rsrc_lock); + return 0; +} + +int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_cn10k_aq_enq_req *aq_req, + struct nix_cn10k_aq_enq_rsp *aq_rsp, + u16 pcifunc, u8 ctype, u32 qidx) +{ + memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req->hdr.pcifunc = pcifunc; + aq_req->ctype = ctype; + aq_req->op = NIX_AQ_INSTOP_READ; + aq_req->qidx = qidx; + + return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)aq_req, + (struct nix_aq_enq_rsp *)aq_rsp); +} + +static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu, + struct nix_hw *nix_hw, + struct nix_cn10k_aq_enq_req *aq_req, + struct nix_cn10k_aq_enq_rsp *aq_rsp, + u32 leaf_prof, u16 mid_prof) +{ + memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req->hdr.pcifunc = 0x00; + aq_req->ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req->op = NIX_AQ_INSTOP_WRITE; + aq_req->qidx = leaf_prof; + + aq_req->prof.band_prof_id = mid_prof & 0x7F; + aq_req->prof_mask.band_prof_id = GENMASK(6, 0); + aq_req->prof.band_prof_id_h = FIELD_GET(NIX_BW_PROF_HI_MASK, mid_prof); + aq_req->prof_mask.band_prof_id_h = GENMASK(3, 0); + aq_req->prof.hl_en = 1; + aq_req->prof_mask.hl_en = 1; + + return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)aq_req, + (struct nix_aq_enq_rsp *)aq_rsp); +} + +#define NIX_RQ_PROF_HI_MASK GENMASK(13, 10) + +int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc, + u16 rq_idx, u16 match_id) +{ + int leaf_prof, mid_prof, leaf_match; + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + struct nix_ipolicer *ipolicer; + struct nix_hw *nix_hw; + int blkaddr, idx, rc; + + if (!rvu->hw->cap.ipolicer) + return 0; + + rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (rc) + return rc; + + /* Fetch the RQ's context to see if policing is enabled */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc, + NIX_AQ_CTYPE_RQ, rq_idx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n", + __func__, rq_idx, pcifunc); + return rc; + } + + if (!aq_rsp.rq.policer_ena) + return 0; + + /* Get the bandwidth profile ID mapped to this RQ */ + leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h); + leaf_prof |= aq_rsp.rq.band_prof_id; + + ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER]; + ipolicer->match_id[leaf_prof] = match_id; + + /* Check if any other leaf profile is marked with same match_id */ + for (idx = 0; idx < ipolicer->band_prof.max; idx++) { + if (idx == leaf_prof) + continue; + if (ipolicer->match_id[idx] != match_id) + continue; + + leaf_match = idx; + break; + } + + if (idx == ipolicer->band_prof.max) + return 0; + + /* Fetch the matching profile's context to check if it's already + * mapped to a mid level profile. + */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_match); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_match); + return rc; + } + + ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; + if (aq_rsp.prof.hl_en) { + /* Get Mid layer prof index and map leaf_prof index + * also such that flows that are being steered + * to different RQs and marked with same match_id + * are rate limited in a aggregate fashion + */ + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, + aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; + + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_prof, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_prof, mid_prof); + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + goto exit; + } + + /* Allocate a mid layer profile and + * map both 'leaf_prof' and 'leaf_match' profiles to it. + */ + mutex_lock(&rvu->rsrc_lock); + mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof); + if (mid_prof < 0) { + dev_err(rvu->dev, + "%s: Unable to allocate mid layer profile\n", __func__); + mutex_unlock(&rvu->rsrc_lock); + goto exit; + } + mutex_unlock(&rvu->rsrc_lock); + ipolicer->pfvf_map[mid_prof] = 0x00; + ipolicer->ref_count[mid_prof] = 0; + + /* Initialize mid layer profile same as 'leaf_prof' */ + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_prof); + goto exit; + } + + memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req)); + aq_req.hdr.pcifunc = 0x00; + aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14); + aq_req.ctype = NIX_AQ_CTYPE_BANDPROF; + aq_req.op = NIX_AQ_INSTOP_WRITE; + memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s)); + memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s)); + /* Clear higher layer enable bit in the mid profile, just in case */ + aq_req.prof.hl_en = 0; + aq_req.prof_mask.hl_en = 1; + + rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, + (struct nix_aq_enq_req *)&aq_req, NULL); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to INIT context of mid layer profile %d\n", + __func__, mid_prof); + goto exit; + } + + /* Map both leaf profiles to this mid layer profile */ + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_prof, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_prof, mid_prof); + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + + rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw, + &aq_req, &aq_rsp, + leaf_match, mid_prof); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to map leaf(%d) and mid(%d) profiles\n", + __func__, leaf_match, mid_prof); + ipolicer->ref_count[mid_prof]--; + goto exit; + } + + mutex_lock(&rvu->rsrc_lock); + ipolicer->ref_count[mid_prof]++; + mutex_unlock(&rvu->rsrc_lock); + +exit: + return rc; +} + +/* Called with mutex rsrc_lock */ +static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, + u32 leaf_prof) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + struct nix_ipolicer *ipolicer; + u16 mid_prof; + int rc; + + mutex_unlock(&rvu->rsrc_lock); + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00, + NIX_AQ_CTYPE_BANDPROF, leaf_prof); + + mutex_lock(&rvu->rsrc_lock); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch context of leaf profile %d\n", + __func__, leaf_prof); + return; + } + + if (!aq_rsp.prof.hl_en) + return; + + mid_prof = FIELD_PREP(NIX_BW_PROF_HI_MASK, aq_rsp.prof.band_prof_id_h); + mid_prof |= aq_rsp.prof.band_prof_id; + ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER]; + ipolicer->ref_count[mid_prof]--; + /* If ref_count is zero, free mid layer profile */ + if (!ipolicer->ref_count[mid_prof]) { + ipolicer->pfvf_map[mid_prof] = 0x00; + rvu_free_rsrc(&ipolicer->band_prof, mid_prof); + } +} + +int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, + struct nix_bandprof_get_hwinfo_rsp *rsp) +{ + struct nix_ipolicer *ipolicer; + int blkaddr, layer, err; + struct nix_hw *nix_hw; + u64 tu; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + /* Return number of bandwidth profiles free at each layer */ + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); + } + mutex_unlock(&rvu->rsrc_lock); + + /* Set the policer timeunit in nanosec */ + tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); + rsp->policer_timeunit = (tu + 1) * 100; + + return 0; +} + +static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp, + u32 mcast_grp_idx) +{ + struct nix_mcast_grp_elem *iter; + bool is_found = false; + + list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) { + if (iter->mcast_grp_idx == mcast_grp_idx) { + is_found = true; + break; + } + } + + if (is_found) + return iter; + + return NULL; +} + +int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, ret; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); + if (!elem) + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + ret = elem->mce_start_index; + + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; +} + +void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc) +{ + struct nix_mcast_grp_destroy_req dreq = { 0 }; + struct nix_mcast_grp_update_req ureq = { 0 }; + struct nix_mcast_grp_update_rsp ursp = { 0 }; + struct nix_mcast_grp_elem *elem, *tmp; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return; + + mcast_grp = &nix_hw->mcast_grp; + + mutex_lock(&mcast_grp->mcast_grp_lock); + list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) { + struct nix_mce_list *mce_list; + struct hlist_node *tmp; + struct mce *mce; + + /* If the pcifunc which created the multicast/mirror + * group received an FLR, then delete the entire group. + */ + if (elem->pcifunc == pcifunc) { + /* Delete group */ + dreq.hdr.pcifunc = elem->pcifunc; + dreq.mcast_grp_idx = elem->mcast_grp_idx; + dreq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); + continue; + } + + /* Iterate the group elements and delete the element which + * received the FLR. + */ + mce_list = &elem->mcast_mce_list; + hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) { + if (mce->pcifunc == pcifunc) { + ureq.hdr.pcifunc = pcifunc; + ureq.num_mce_entry = 1; + ureq.mcast_grp_idx = elem->mcast_grp_idx; + ureq.op = NIX_MCAST_OP_DEL_ENTRY; + ureq.pcifunc[0] = pcifunc; + ureq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp); + break; + } + } + } + mutex_unlock(&mcast_grp->mcast_grp_lock); +} + +int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, + u32 mcast_grp_idx, u16 mcam_index) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, ret = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + mcast_grp = &nix_hw->mcast_grp; + mutex_lock(&mcast_grp->mcast_grp_lock); + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx); + if (!elem) + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + else + elem->mcam_index = mcam_index; + + mutex_unlock(&mcast_grp->mcast_grp_lock); + return ret; +} + +int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu, + struct nix_mcast_grp_create_req *req, + struct nix_mcast_grp_create_rsp *rsp) +{ + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + struct nix_hw *nix_hw; + int blkaddr, err; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + elem = kzalloc(sizeof(*elem), GFP_KERNEL); + if (!elem) + return -ENOMEM; + + INIT_HLIST_HEAD(&elem->mcast_mce_list.head); + elem->mcam_index = -1; + elem->mce_start_index = -1; + elem->pcifunc = req->hdr.pcifunc; + elem->dir = req->dir; + elem->mcast_grp_idx = mcast_grp->next_grp_index++; + + mutex_lock(&mcast_grp->mcast_grp_lock); + list_add_tail(&elem->list, &mcast_grp->mcast_grp_head); + mcast_grp->count++; + mutex_unlock(&mcast_grp->mcast_grp_lock); + + rsp->mcast_grp_idx = elem->mcast_grp_idx; + return 0; +} + +int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu, + struct nix_mcast_grp_destroy_req *req, + struct msg_rsp *rsp) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_delete_flow_rsp uninstall_rsp = { 0 }; + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + int blkaddr, err, ret = 0; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the deletion, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } + + /* If no mce entries are associated with the group + * then just remove it from the global list. + */ + if (!elem->mcast_mce_list.count) + goto delete_grp; + + /* Delete the associated mcam entry and + * remove all mce entries from the group + */ + mcast = &nix_hw->mcast; + mutex_lock(&mcast->mce_lock); + if (elem->mcam_index != -1) { + uninstall_req.hdr.pcifunc = req->hdr.pcifunc; + uninstall_req.entry = elem->mcam_index; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp); + } + + nix_free_mce_list(mcast, elem->mcast_mce_list.count, + elem->mce_start_index, elem->dir); + nix_delete_mcast_mce_list(&elem->mcast_mce_list); + mutex_unlock(&mcast->mce_lock); + +delete_grp: + list_del(&elem->list); + kfree(elem); + mcast_grp->count--; + +unlock_grp: + if (!req->is_af) + mutex_unlock(&mcast_grp->mcast_grp_lock); + + return ret; +} + +int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu, + struct nix_mcast_grp_update_req *req, + struct nix_mcast_grp_update_rsp *rsp) +{ + struct nix_mcast_grp_destroy_req dreq = { 0 }; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct nix_mcast_grp_elem *elem; + struct nix_mcast_grp *mcast_grp; + int blkaddr, err, npc_blkaddr; + u16 prev_count, new_count; + struct nix_mcast *mcast; + struct nix_hw *nix_hw; + int i, ret; + + if (!req->num_mce_entry) + return 0; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + mcast_grp = &nix_hw->mcast_grp; + + /* If AF is requesting for the updation, + * then AF is already taking the lock + */ + if (!req->is_af) + mutex_lock(&mcast_grp->mcast_grp_lock); + + elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx); + if (!elem) { + ret = NIX_AF_ERR_INVALID_MCAST_GRP; + goto unlock_grp; + } + + /* If any pcifunc matches the group's pcifunc, then we can + * delete the entire group. + */ + if (req->op == NIX_MCAST_OP_DEL_ENTRY) { + for (i = 0; i < req->num_mce_entry; i++) { + if (elem->pcifunc == req->pcifunc[i]) { + /* Delete group */ + dreq.hdr.pcifunc = elem->pcifunc; + dreq.mcast_grp_idx = elem->mcast_grp_idx; + dreq.is_af = 1; + rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL); + ret = 0; + goto unlock_grp; + } + } + } + + mcast = &nix_hw->mcast; + mutex_lock(&mcast->mce_lock); + npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false); + + prev_count = elem->mcast_mce_list.count; + if (req->op == NIX_MCAST_OP_ADD_ENTRY) { + new_count = prev_count + req->num_mce_entry; + if (prev_count) + nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); + + elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); + + /* It is possible not to get contiguous memory */ + if (elem->mce_start_index < 0) { + if (elem->mcam_index != -1) { + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST; + goto unlock_mce; + } + } + + ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req); + if (ret) { + nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); + if (prev_count) + elem->mce_start_index = nix_alloc_mce_list(mcast, + prev_count, + elem->dir); + + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + + goto unlock_mce; + } + } else { + if (!prev_count || prev_count < req->num_mce_entry) { + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, + elem->mcam_index, true); + ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ; + goto unlock_mce; + } + + nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir); + new_count = prev_count - req->num_mce_entry; + elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir); + ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req); + if (ret) { + nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir); + elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir); + if (elem->mcam_index != -1) + npc_enable_mcam_entry(rvu, mcam, + npc_blkaddr, + elem->mcam_index, + true); + + goto unlock_mce; + } + } + + if (elem->mcam_index == -1) { + rsp->mce_start_index = elem->mce_start_index; + ret = 0; + goto unlock_mce; + } + + nix_mcast_update_action(rvu, elem); + npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true); + rsp->mce_start_index = elem->mce_start_index; + ret = 0; + +unlock_mce: + mutex_unlock(&mcast->mce_lock); + +unlock_grp: + if (!req->is_af) + mutex_unlock(&mcast_grp->mcast_grp_lock); + + return ret; +} + +/* On CN10k and older series of silicons, hardware may incorrectly + * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG + * to broadcacst XON on the same. + */ +void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr) +{ + struct rvu_block *block = &rvu->hw->block[blkaddr]; + u64 cfg; + + if (!block->implemented || is_cn20k(rvu->pdev)) + return; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg); +} |
