diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_ppe.c')
| -rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe.c | 245 |
1 files changed, 212 insertions, 33 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 269208a841c7..ada852adc5f7 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -8,7 +8,11 @@ #include <linux/platform_device.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> + +#include <net/dst_metadata.h> #include <net/dsa.h> +#include <net/ipv6.h> + #include "mtk_eth_soc.h" #include "mtk_ppe.h" #include "mtk_ppe_regs.h" @@ -74,6 +78,55 @@ static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) return ret; } +static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val, + !(val & MTK_PPE_MIB_SER_CR_ST), + 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "MIB table busy"); + + return ret; +} + +static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets) +{ + u32 val, cnt_r0, cnt_r1, cnt_r2; + int ret; + + val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST; + ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val); + + ret = mtk_ppe_mib_wait_busy(ppe); + if (ret) + return ret; + + cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0); + cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); + cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2); + + if (mtk_is_netsys_v3_or_greater(ppe->eth)) { + /* 64 bit for each counter */ + u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3); + *bytes = ((u64)cnt_r1 << 32) | cnt_r0; + *packets = ((u64)cnt_r3 << 32) | cnt_r2; + } else { + /* 48 bit byte counter, 40 bit packet counter */ + u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); + u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); + u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); + u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); + *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; + *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low; + } + + return 0; +} + static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) { ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); @@ -165,7 +218,7 @@ int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, memset(entry, 0, sizeof(*entry)); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | @@ -229,7 +282,7 @@ int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, u32 *ib2 = mtk_foe_entry_ib2(eth, entry); u32 val = *ib2; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { val &= ~MTK_FOE_IB2_DEST_PORT_V2; val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); } else { @@ -288,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, { int type = mtk_get_ib1_pkt_type(eth, entry->ib1); u32 *src, *dest; - int i; switch (type) { case MTK_PPE_PKT_TYPE_IPV4_DSLITE: @@ -309,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, return -EINVAL; } - for (i = 0; i < 4; i++) - src[i] = be32_to_cpu(src_addr[i]); - for (i = 0; i < 4; i++) - dest[i] = be32_to_cpu(dest_addr[i]); + ipv6_addr_be32_to_cpu(src, src_addr); + ipv6_addr_be32_to_cpu(dest, dest_addr); return 0; } @@ -375,18 +425,29 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, } int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, - int wdma_idx, int txq, int bss, int wcid) + int wdma_idx, int txq, int bss, int wcid, + bool amsdu_en) { struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); u32 *ib2 = mtk_foe_entry_ib2(eth, entry); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + switch (eth->soc->version) { + case 3: + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | + MTK_FOE_IB2_WDMA_WINFO_V2; + l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) | + FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss); + l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en); + break; + case 2: *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | MTK_FOE_IB2_WDMA_WINFO_V2; l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | FIELD_PREP(MTK_FOE_WINFO_BSS, bss); - } else { + break; + default: *ib2 &= ~MTK_FOE_IB2_PORT_MG; *ib2 |= MTK_FOE_IB2_WDMA_WINFO; if (wdma_idx) @@ -394,6 +455,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + break; } return 0; @@ -404,7 +466,7 @@ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry, { u32 *ib2 = mtk_foe_entry_ib2(eth, entry); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { *ib2 &= ~MTK_FOE_IB2_QID_V2; *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue); *ib2 |= MTK_FOE_IB2_PSE_QOS_V2; @@ -458,6 +520,15 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) hwe->ib1 &= ~MTK_FOE_IB1_STATE; hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); dma_wmb(); + mtk_ppe_cache_clear(ppe); + + if (ppe->accounting) { + struct mtk_foe_accounting *acct; + + acct = ppe->acct_table + entry->hash * sizeof(*acct); + acct->packets = 0; + acct->bytes = 0; + } } entry->hash = 0xffff; @@ -509,7 +580,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) idle = cur_idle; entry->data.ib1 &= ~ib1_ts_mask; - entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; + entry->data.ib1 |= ib1 & ib1_ts_mask; } } @@ -549,8 +620,9 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, struct mtk_eth *eth = ppe->eth; u16 timestamp = mtk_eth_timestamp(eth); struct mtk_foe_entry *hwe; + u32 val; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(eth)) { entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, timestamp); @@ -565,6 +637,14 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, wmb(); hwe->ib1 = entry->ib1; + if (ppe->accounting) { + if (mtk_is_netsys_v2_or_greater(eth)) + val = MTK_FOE_IB2_MIB_CNT_V2; + else + val = MTK_FOE_IB2_MIB_CNT; + *mtk_foe_entry_ib2(eth, hwe) |= val; + } + dma_wmb(); mtk_ppe_cache_clear(ppe); @@ -580,10 +660,20 @@ void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) static int mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { + struct mtk_flow_entry *prev; + entry->type = MTK_FLOW_TYPE_L2; - return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, - mtk_flow_l2_ht_params); + prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); + if (likely(!prev)) + return 0; + + if (IS_ERR(prev)) + return PTR_ERR(prev); + + return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node, + &entry->l2_node, mtk_flow_l2_ht_params); } int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) @@ -615,8 +705,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; int type; - flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), - GFP_ATOMIC); + flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC); if (!flow_info) return; @@ -700,7 +789,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) goto out; - tag += 4; + if (!skb_metadata_dst(skb)) + tag += 4; + if (get_unaligned_be16(tag) != ETH_P_8021Q) break; @@ -730,11 +821,66 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); } -struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, - int version, int index) +int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) +{ + if (!ppe) + return -EINVAL; + + /* disable KA */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0); + usleep_range(10000, 11000); + + /* set KA timer to maximum */ + ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE); + ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff); + + /* set KA tick select */ + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL); + ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE); + usleep_range(10000, 11000); + + /* disable scan mode */ + ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE); + usleep_range(10000, 11000); + + return mtk_ppe_wait_busy(ppe); +} + +struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, + struct mtk_foe_accounting *diff) { + struct mtk_foe_accounting *acct; + int size = sizeof(struct mtk_foe_accounting); + u64 bytes, packets; + + if (!ppe->accounting) + return NULL; + + if (mtk_mib_entry_read(ppe, index, &bytes, &packets)) + return NULL; + + acct = ppe->acct_table + index * size; + + acct->bytes += bytes; + acct->packets += packets; + + if (diff) { + diff->bytes = bytes; + diff->packets = packets; + } + + return acct; +} + +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index) +{ + bool accounting = eth->soc->has_accounting; const struct mtk_soc_data *soc = eth->soc; + struct mtk_foe_accounting *acct; struct device *dev = eth->dev; + struct mtk_mib_entry *mib; struct mtk_ppe *ppe; u32 foe_flow_size; void *foe; @@ -751,7 +897,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, ppe->base = base; ppe->eth = eth; ppe->dev = dev; - ppe->version = version; + ppe->version = eth->soc->offload_version; + ppe->accounting = accounting; foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * soc->foe_entry_size, @@ -767,6 +914,23 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, if (!ppe->foe_flow) goto err_free_l2_flows; + if (accounting) { + mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib), + &ppe->mib_phys, GFP_KERNEL); + if (!mib) + return NULL; + + ppe->mib_table = mib; + + acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct), + GFP_KERNEL); + + if (!acct) + return NULL; + + ppe->acct_table = acct; + } + mtk_ppe_debugfs_init(ppe, index); return ppe; @@ -819,8 +983,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) mtk_ppe_init_foe_table(ppe); ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); - val = MTK_PPE_TB_CFG_ENTRY_80B | - MTK_PPE_TB_CFG_AGE_NON_L4 | + val = MTK_PPE_TB_CFG_AGE_NON_L4 | MTK_PPE_TB_CFG_AGE_UNBIND | MTK_PPE_TB_CFG_AGE_TCP | MTK_PPE_TB_CFG_AGE_UDP | @@ -831,11 +994,13 @@ void mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_KEEPALIVE_DISABLE) | FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, - MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + MTK_PPE_SCAN_MODE_CHECK_AGE) | FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, MTK_PPE_ENTRIES_SHIFT); - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(ppe->eth)) val |= MTK_PPE_TB_CFG_INFO_SEL; + if (!mtk_is_netsys_v3_or_greater(ppe->eth)) + val |= MTK_PPE_TB_CFG_ENTRY_80B; ppe_w32(ppe, MTK_PPE_TB_CFG, val); ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, @@ -850,7 +1015,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_FLOW_CFG_IP4_NAPT | MTK_PPE_FLOW_CFG_IP4_DSLITE | MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + if (mtk_is_netsys_v2_or_greater(ppe->eth)) val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | MTK_PPE_MD_TOAP_BYP_CRSN1 | MTK_PPE_MD_TOAP_BYP_CRSN2 | @@ -892,10 +1057,20 @@ void mtk_ppe_start(struct mtk_ppe *ppe) ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); - if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) { + if (mtk_is_netsys_v2_or_greater(ppe->eth)) { ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); } + + if (ppe->accounting && ppe->mib_phys) { + ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys); + ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN, + MTK_PPE_MIB_CFG_EN); + ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR, + MTK_PPE_MIB_CFG_RD_CLR); + ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN, + MTK_PPE_MIB_CFG_RD_CLR); + } } int mtk_ppe_stop(struct mtk_ppe *ppe) @@ -915,17 +1090,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe) mtk_ppe_cache_enable(ppe, false); - /* disable offload engine */ - ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); - ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); - /* disable aging */ val = MTK_PPE_TB_CFG_AGE_NON_L4 | MTK_PPE_TB_CFG_AGE_UNBIND | MTK_PPE_TB_CFG_AGE_TCP | MTK_PPE_TB_CFG_AGE_UDP | - MTK_PPE_TB_CFG_AGE_TCP_FIN; + MTK_PPE_TB_CFG_AGE_TCP_FIN | + MTK_PPE_TB_CFG_SCAN_MODE; ppe_clear(ppe, MTK_PPE_TB_CFG, val); - return mtk_ppe_wait_busy(ppe); + if (mtk_ppe_wait_busy(ppe)) + return -ETIMEDOUT; + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + return 0; } |
