summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mediatek/mtk_eth_soc.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_eth_soc.h')
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1083
1 files changed, 830 insertions, 253 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 454cfcd465fd..0168e2fbc619 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -15,14 +15,27 @@
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/phylink.h>
+#include <linux/rhashtable.h>
+#include <linux/dim.h>
+#include <linux/bitfield.h>
+#include <net/page_pool/types.h>
+#include <linux/bpf_trace.h>
+#include "mtk_ppe.h"
+#define MTK_MAX_DSA_PORTS 7
+#define MTK_DSA_PORT_MASK GENMASK(2, 0)
+
+#define MTK_QDMA_NUM_QUEUES 16
#define MTK_QDMA_PAGE_SIZE 2048
-#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
-#define MTK_DMA_SIZE 256
-#define MTK_NAPI_WEIGHT 64
-#define MTK_MAC_COUNT 2
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
+#define MTK_QDMA_RING_SIZE 2048
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
+#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -36,13 +49,20 @@
#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_TX | \
- NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
- NETIF_F_IPV6_CSUM)
+ NETIF_F_IPV6_CSUM |\
+ NETIF_F_HW_TC)
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
+#define MTK_QRX_OFFSET 0x10
+
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -57,12 +77,23 @@
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00)
+#define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16)
+
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
#define RST_GL_PSE BIT(0)
/* Frame Engine Interrupt Status Register */
#define MTK_INT_STATUS2 0x08
+#define MTK_FE_INT_ENABLE 0x0c
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
@@ -76,42 +107,86 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMQ Exgress Control Register */
+#define MTK_CDMQ_EG_CTRL 0x1404
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
/* GDM Exgress Control Register */
-#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x540 : 0x500 + (_x * 0x1000); })
+#define MTK_GDMA_SPECIAL_TAG BIT(24)
#define MTK_GDMA_ICS_EN BIT(22)
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_STRP_CRC BIT(16)
#define MTK_GDMA_TO_PDMA 0x0
#define MTK_GDMA_DROP_ALL 0x7777
+/* GDM Egress Control Register */
+#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x544 : 0x504 + (_x * 0x1000); })
+#define MTK_GDMA_XGDM_SEL BIT(31)
+
/* Unicast Filter MAC Address Register - Low */
-#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
+#define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x548 : 0x508 + (_x * 0x1000); })
/* Unicast Filter MAC Address Register - High */
-#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x54C : 0x50C + (_x * 0x1000); })
+
+/* legacy DT support for internal SRAM */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+#define MTK_ETH_SRAM_GRANULARITY 32
+#define MTK_ETH_NETSYS_V2_SRAM_SIZE 0x40000
+
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Free Queue Flow Control */
+#define PSE_FQFC_CFG1 0x100
+#define PSE_FQFC_CFG2 0x104
+#define PSE_DROP_CFG 0x108
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
-/* PDMA RX Base Pointer Register */
-#define MTK_PRX_BASE_PTR0 0x900
-#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
-/* PDMA RX Maximum Count Register */
-#define MTK_PRX_MAX_CNT0 0x904
-#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
-/* PDMA RX CPU Pointer Register */
-#define MTK_PRX_CRX_IDX0 0x908
-#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+/* GDM and CDM Threshold */
+#define MTK_GDM2_THRES 0x1530
+#define MTK_CDMW0_THRES 0x164c
+#define MTK_CDMW1_THRES 0x1650
+#define MTK_CDME0_THRES 0x1654
+#define MTK_CDME1_THRES 0x1658
+#define MTK_CDMM_THRES 0x165c
/* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
#define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988
@@ -119,39 +194,35 @@
#define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
-/* PDMA Global Configuration Register */
-#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_RX_DMA_LRO_EN BIT(8)
#define MTK_MULTI_EN BIT(10)
#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_LRO_SDL 0x3000
+#define MTK_RX_CFG_SDL_OFFSET 16
+
/* PDMA Reset Index Register */
-#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
+#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
-#define MTK_PDMA_DELAY_RX_PINT 4
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
-#define MTK_PDMA_DELAY_RX_PTIME 4
-#define MTK_PDMA_DELAY_RX_DELAY \
- (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \
- (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT))
+#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
-/* PDMA Interrupt Status Register */
-#define MTK_PDMA_INT_STATUS 0xa20
+#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
+#define MTK_PDMA_DELAY_TX_EN BIT(31)
+#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
+#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
-/* PDMA Interrupt Mask Register */
-#define MTK_PDMA_INT_MASK 0xa28
+#define MTK_PDMA_DELAY_PINT_MASK 0x7f
+#define MTK_PDMA_DELAY_PTIME_MASK 0xff
/* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
-/* PDMA Interrupt grouping registers */
-#define MTK_PDMA_INT_GRP1 0xa50
-#define MTK_PDMA_INT_GRP2 0xa54
-
/* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
@@ -173,52 +244,54 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
+#define MTK_QTX_OFFSET 0x10
#define QDMA_RES_THRES 4
-/* QDMA TX Queue Scheduler Registers */
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-
-/* QDMA RX Base Pointer Register */
-#define MTK_QRX_BASE_PTR0 0x1900
+/* QDMA Tx Queue Scheduler Configuration Registers */
+#define MTK_QTX_SCH_TX_SEL BIT(31)
+#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
-/* QDMA RX Maximum Count Register */
-#define MTK_QRX_MAX_CNT0 0x1904
+#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
+#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
+#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
-/* QDMA RX CPU Pointer Register */
-#define MTK_QRX_CRX_IDX0 0x1908
-
-/* QDMA RX DMA Pointer Register */
-#define MTK_QRX_DRX_IDX0 0x190C
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
/* QDMA Global Configuration Register */
-#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
-#define MTK_DMA_SIZE_16DWORDS (2 << 4)
+#define MTK_TX_BT_32DWORDS (3 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
#define MTK_TX_DMA_BUSY BIT(1)
#define MTK_RX_DMA_EN BIT(2)
#define MTK_TX_DMA_EN BIT(0)
-#define MTK_DMA_BUSY_TIMEOUT HZ
-
-/* QDMA Reset Index Register */
-#define MTK_QDMA_RST_IDX 0x1A08
+#define MTK_DMA_BUSY_TIMEOUT_US 1000000
-/* QDMA Delay Interrupt Register */
-#define MTK_QDMA_DELAY_INT 0x1A0C
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
+#define MTK_LEAKY_BUCKET_EN BIT(11)
/* QDMA Flow Control Register */
-#define MTK_QDMA_FC_THRES 0x1A10
#define FC_THRES_DROP_MODE BIT(20)
#define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
+#define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
#define MTK_RX_DONE_INT1 BIT(17)
@@ -228,47 +301,33 @@
#define MTK_TX_DONE_INT1 BIT(1)
#define MTK_TX_DONE_INT0 BIT(0)
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-/* QDMA Interrupt grouping registers */
-#define MTK_QDMA_INT_GRP1 0x1a20
-#define MTK_QDMA_INT_GRP2 0x1a24
-#define MTK_RLS_DONE_INT BIT(0)
-
-/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_MASK 0x1A1C
+#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
-/* QDMA Interrupt Mask Register */
-#define MTK_QDMA_HRED2 0x1A44
+#define MTK_RX_DONE_INT_V2 BIT(14)
-/* QDMA TX Forward CPU Pointer Register */
-#define MTK_QTX_CTX_PTR 0x1B00
+#define MTK_CDM_TXFIFO_RDY BIT(7)
-/* QDMA TX Forward DMA Pointer Register */
-#define MTK_QTX_DTX_PTR 0x1B04
-
-/* QDMA TX Release CPU Pointer Register */
-#define MTK_QTX_CRX_PTR 0x1B10
-
-/* QDMA TX Release DMA Pointer Register */
-#define MTK_QTX_DRX_PTR 0x1B14
+/* QDMA Interrupt grouping registers */
+#define MTK_RLS_DONE_INT BIT(0)
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_HEAD 0x1B20
+/* QDMA TX NUM */
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+#define MTK_QDMA_GMAC2_QID 8
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_TAIL 0x1B24
+#define MTK_TX_DMA_BUF_SHIFT 8
-/* QDMA FQ Free Page Counter Register */
-#define MTK_QDMA_FQ_CNT 0x1B28
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
-/* QDMA FQ Free Page Buffer Length Register */
-#define MTK_QDMA_FQ_BLEN 0x1B2C
+#define TX_DMA_SPTAG_V3 BIT(27)
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
-#define MTK_STAT_OFFSET 0x40
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
@@ -280,10 +339,18 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
-#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+#define TX_DMA_PQID GENMASK(3, 0)
+#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32)
+# define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define TX_DMA_GET_ADDR64(x) (0)
+# define TX_DMA_PREP_ADDR64(x) (0)
+#endif
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -293,40 +360,112 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+#define RX_DMA_VTAG BIT(15)
+#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32)
+# define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define RX_DMA_GET_ADDR64(x) (0)
+# define RX_DMA_PREP_ADDR64(x) (0)
+#endif
/* QDMA descriptor rxd3 */
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
+
+/* QDMA descriptor rxd4 */
+#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
+#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
+#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
+#define MTK_RXD4_ALG GENMASK(31, 22)
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
+#define RX_DMA_SPECIAL_TAG BIT(22)
+
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
+/* PHY Polling and SMI Master Control registers */
+#define MTK_PPSC 0x10000
+#define PPSC_MDC_CFG GENMASK(29, 24)
+#define PPSC_MDC_TURBO BIT(20)
+#define MDC_MAX_FREQ 25000000
+#define MDC_MAX_DIVIDER 63
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
-#define PHY_IAC_READ BIT(19)
-#define PHY_IAC_WRITE BIT(18)
-#define PHY_IAC_START BIT(16)
-#define PHY_IAC_ADDR_SHIFT 20
-#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_REG_MASK GENMASK(29, 25)
+#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
+#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
+#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
+#define PHY_IAC_CMD_MASK GENMASK(19, 18)
+#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
+#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
+#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
+#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
+#define PHY_IAC_START_MASK GENMASK(17, 16)
+#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
+#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
+#define PHY_IAC_DATA_MASK GENMASK(15, 0)
+#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
+#define MTK_MAC_MISC_V3 0x10010
#define MTK_MUX_TO_ESW BIT(0)
+#define MISC_MDC_TURBO BIT(4)
+
+/* XMAC status registers */
+#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
+#define MTK_USXGMII_PCS_LINK BIT(8)
+#define MTK_XGMAC_RX_FC BIT(5)
+#define MTK_XGMAC_TX_FC BIT(4)
+#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
+#define MTK_XGMAC_LINK_STS BIT(0)
+
+/* GSW bridge registers */
+#define MTK_GSW_CFG (0x10080)
+#define GSWTX_IPG_MASK GENMASK(19, 16)
+#define GSWTX_IPG_SHIFT 16
+#define GSWRX_IPG_MASK GENMASK(3, 0)
+#define GSWRX_IPG_SHIFT 0
+#define GSW_IPG_11 11
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
-#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
+#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
+#define MAC_MCR_MAX_RX_1518 0x0
+#define MAC_MCR_MAX_RX_1536 0x1
+#define MAC_MCR_MAX_RX_1552 0x2
+#define MAC_MCR_MAX_RX_2048 0x3
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -335,6 +474,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
@@ -379,6 +527,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -399,7 +562,7 @@
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
-#define SYSCFG0_SGMII_MASK GENMASK(9, 8)
+#define SYSCFG0_SGMII_MASK GENMASK(9, 7)
#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
@@ -414,56 +577,42 @@
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
-#define ETHSYS_RSTCTRL 0x34
-#define RSTCTRL_FE BIT(6)
-#define RSTCTRL_PPE BIT(31)
-
-/* SGMII subsystem config registers */
-/* Register to auto-negotiation restart */
-#define SGMSYS_PCS_CONTROL_1 0x0
-#define SGMII_AN_RESTART BIT(9)
-#define SGMII_ISOLATE BIT(10)
-#define SGMII_AN_ENABLE BIT(12)
-#define SGMII_LINK_STATYS BIT(18)
-#define SGMII_AN_ABILITY BIT(19)
-#define SGMII_AN_COMPLETE BIT(21)
-#define SGMII_PCS_FAULT BIT(23)
-#define SGMII_AN_EXPANSION_CLR BIT(30)
-
-/* Register to programmable link timer, the unit in 2 * 8ns */
-#define SGMSYS_PCS_LINK_TIMER 0x18
-#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
-
-/* Register to control remote fault */
-#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_IF_MODE_BIT0 BIT(0)
-#define SGMII_SPEED_DUPLEX_AN BIT(1)
-#define SGMII_SPEED_10 0x0
-#define SGMII_SPEED_100 BIT(2)
-#define SGMII_SPEED_1000 BIT(3)
-#define SGMII_DUPLEX_FULL BIT(4)
-#define SGMII_IF_MODE_BIT5 BIT(5)
-#define SGMII_REMOTE_FAULT_DIS BIT(8)
-#define SGMII_CODE_SYNC_SET_VAL BIT(9)
-#define SGMII_CODE_SYNC_SET_EN BIT(10)
-#define SGMII_SEND_AN_ERROR_EN BIT(11)
-#define SGMII_IF_MODE_MASK GENMASK(5, 1)
-
-/* Register to set SGMII speed, ANA RG_ Control Signals III*/
-#define SGMSYS_ANA_RG_CS3 0x2028
-#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
-#define RG_PHY_SPEED_1_25G 0x0
-#define RG_PHY_SPEED_3_125G BIT(2)
-
-/* Register to power up QPHY */
-#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
-#define SGMII_PHYA_PWD BIT(4)
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_WDMA0 BIT(24)
+#define RSTCTRL_WDMA1 BIT(25)
+#define RSTCTRL_WDMA2 BIT(26)
+#define RSTCTRL_PPE0 BIT(31)
+#define RSTCTRL_PPE0_V2 BIT(30)
+#define RSTCTRL_PPE1 BIT(31)
+#define RSTCTRL_PPE0_V3 BIT(29)
+#define RSTCTRL_PPE1_V3 BIT(30)
+#define RSTCTRL_PPE2 BIT(31)
+#define RSTCTRL_ETH BIT(23)
+
+/* ethernet reset check idle register */
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
/* Infrasys subsystem config registers */
#define INFRA_MISC2 0x70c
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
+#define USB_PHY_SWITCH_REG 0x218
+#define QPHY_SEL_MASK GENMASK(1, 0)
+#define SGMII_QPHY_SEL 0x2
+
/* MT7628/88 specific stuff */
#define MT7628_PDMA_OFFSET 0x0800
#define MT7628_SDM_OFFSET 0x0c00
@@ -477,6 +626,29 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_CDM3_FSM 0x238
+#define MTK_FE_CDM4_FSM 0x298
+#define MTK_FE_CDM5_FSM 0x318
+#define MTK_FE_CDM6_FSM 0x328
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+
+#define MTK_FE_IRQ_SHARED 0
+#define MTK_FE_IRQ_TX 0
+#define MTK_FE_IRQ_RX 1
+#define MTK_FE_IRQ_NUM (MTK_FE_IRQ_RX + 1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -484,6 +656,17 @@ struct mtk_rx_dma {
unsigned int rxd4;
} __packed __aligned(4);
+struct mtk_rx_dma_v2 {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+} __packed __aligned(4);
+
struct mtk_tx_dma {
unsigned int txd1;
unsigned int txd2;
@@ -491,9 +674,30 @@ struct mtk_tx_dma {
unsigned int txd4;
} __packed __aligned(4);
+struct mtk_tx_dma_v2 {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+} __packed __aligned(4);
+
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -517,6 +721,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
@@ -528,12 +734,6 @@ enum mtk_tx_flags {
*/
MTK_TX_FLAGS_SINGLE0 = 0x01,
MTK_TX_FLAGS_PAGE0 = 0x02,
-
- /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
- * SKB out instead of looking up through hardware TX descriptor.
- */
- MTK_TX_FLAGS_FPORT0 = 0x04,
- MTK_TX_FLAGS_FPORT1 = 0x08,
};
/* This enum allows us to identify how the clock is defined on the array of the
@@ -546,6 +746,11 @@ enum mtk_clks_map {
MTK_CLK_GP0,
MTK_CLK_GP1,
MTK_CLK_GP2,
+ MTK_CLK_GP3,
+ MTK_CLK_XGP1,
+ MTK_CLK_XGP2,
+ MTK_CLK_XGP3,
+ MTK_CLK_CRYPTO,
MTK_CLK_FE,
MTK_CLK_TRGPLL,
MTK_CLK_SGMII_TX_250M,
@@ -558,42 +763,141 @@ enum mtk_clks_map {
MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_NETSYS0,
+ MTK_CLK_NETSYS1,
+ MTK_CLK_ETHWARP_WOCPU2,
+ MTK_CLK_ETHWARP_WOCPU1,
+ MTK_CLK_ETHWARP_WOCPU0,
+ MTK_CLK_TOP_SGM_0_SEL,
+ MTK_CLK_TOP_SGM_1_SEL,
+ MTK_CLK_TOP_ETH_GMII_SEL,
+ MTK_CLK_TOP_ETH_REFCK_50M_SEL,
+ MTK_CLK_TOP_ETH_SYS_200M_SEL,
+ MTK_CLK_TOP_ETH_SYS_SEL,
+ MTK_CLK_TOP_ETH_XGMII_SEL,
+ MTK_CLK_TOP_ETH_MII_SEL,
+ MTK_CLK_TOP_NETSYS_SEL,
+ MTK_CLK_TOP_NETSYS_500M_SEL,
+ MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
+ MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
+ MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
+ MTK_CLK_TOP_NETSYS_WARP_SEL,
MTK_CLK_MAX
};
-#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_TRGPLL))
-#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL))
+#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
#define MT7628_CLKS_BITMAP (0)
-#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
- BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
- BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
- BIT(MTK_CLK_SGMII_TX_250M) | \
- BIT(MTK_CLK_SGMII_RX_250M) | \
- BIT(MTK_CLK_SGMII_CDR_REF) | \
- BIT(MTK_CLK_SGMII_CDR_FB) | \
- BIT(MTK_CLK_SGMII2_TX_250M) | \
- BIT(MTK_CLK_SGMII2_RX_250M) | \
- BIT(MTK_CLK_SGMII2_CDR_REF) | \
- BIT(MTK_CLK_SGMII2_CDR_FB) | \
- BIT(MTK_CLK_SGMII_CK) | \
- BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK))
+#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
+#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
+ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
+ BIT_ULL(MTK_CLK_CRYPTO) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
+/* PSE Port Definition */
+enum mtk_pse_port {
+ PSE_ADMA_PORT = 0,
+ PSE_GDM1_PORT,
+ PSE_GDM2_PORT,
+ PSE_PPE0_PORT,
+ PSE_PPE1_PORT,
+ PSE_QDMA_TX_PORT,
+ PSE_QDMA_RX_PORT,
+ PSE_DROP_PORT,
+ PSE_WDMA0_PORT,
+ PSE_WDMA1_PORT,
+ PSE_TDMA_PORT,
+ PSE_NONE_PORT,
+ PSE_PPE2_PORT,
+ PSE_WDMA2_PORT,
+ PSE_EIP197_PORT,
+ PSE_GDM3_PORT,
+ PSE_PORT_MAX
+};
+
+/* GMAC Identifier */
+enum mtk_gmac_id {
+ MTK_GMAC1_ID = 0,
+ MTK_GMAC2_ID,
+ MTK_GMAC3_ID,
+ MTK_GMAC_ID_MAX
+};
+
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -603,8 +907,11 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
- u32 flags;
+ enum mtk_tx_buf_type type;
+ void *data;
+
+ u16 mac_id;
+ u16 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
DEFINE_DMA_UNMAP_ADDR(dma_addr1);
@@ -617,16 +924,18 @@ struct mtk_tx_buf {
* @phys: The physical addr of tx_buf
* @next_free: Pointer to the next free descriptor
* @last_free: Pointer to the last free descriptor
+ * @last_free_ptr: Hardware pointer value of the last free descriptor
* @thresh: The threshold of minimum amount of free descriptors
* @free_count: QDMA uses a linked list. Track how many free descriptors
* are present
*/
struct mtk_tx_ring {
- struct mtk_tx_dma *dma;
+ void *dma;
struct mtk_tx_buf *buf;
dma_addr_t phys;
struct mtk_tx_dma *next_free;
struct mtk_tx_dma *last_free;
+ u32 last_free_ptr;
u16 thresh;
atomic_t free_count;
int dma_size;
@@ -651,7 +960,7 @@ enum mtk_rx_flags {
* @calc_idx: The current head of ring
*/
struct mtk_rx_ring {
- struct mtk_rx_dma *dma;
+ void *dma;
u8 **data;
dma_addr_t phys;
u16 frag_size;
@@ -660,12 +969,16 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -676,11 +989,17 @@ enum mkt_eth_capabilities {
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
MTK_SOC_MT7628_BIT,
+ MTK_RSTCTRL_PPE1_BIT,
+ MTK_RSTCTRL_PPE2_BIT,
+ MTK_U3_COPHY_V2_BIT,
+ MTK_SRAM_BIT,
+ MTK_36BIT_DMA_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -690,44 +1009,54 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
/* Supported hardware group on SoCs */
-#define MTK_RGMII BIT(MTK_RGMII_BIT)
-#define MTK_TRGMII BIT(MTK_TRGMII_BIT)
-#define MTK_SGMII BIT(MTK_SGMII_BIT)
-#define MTK_ESW BIT(MTK_ESW_BIT)
-#define MTK_GEPHY BIT(MTK_GEPHY_BIT)
-#define MTK_MUX BIT(MTK_MUX_BIT)
-#define MTK_INFRA BIT(MTK_INFRA_BIT)
-#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT)
-#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
-#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
-#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
-#define MTK_QDMA BIT(MTK_QDMA_BIT)
-#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
+#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
+#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
+#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
+#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
+#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
+#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
+#define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
- BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+ BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
- BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
- BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
- BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
- BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+ BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
/* Supported path present on SoCs */
-#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
-#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT)
-#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
-#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT)
+#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
@@ -735,6 +1064,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -754,6 +1084,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -780,8 +1114,79 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
+
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
+
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
+
+struct mtk_tx_dma_desc_info {
+ dma_addr_t addr;
+ u32 size;
+ u16 vlan_tci;
+ u16 qid;
+ u8 gso:1;
+ u8 csum:1;
+ u8 vlan:1;
+ u8 first:1;
+ u8 last:1;
+};
+
+struct mtk_reg_map {
+ u32 tx_irq_mask;
+ u32 tx_irq_status;
+ struct {
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 pcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 irq_status; /* interrupt status */
+ u32 irq_mask; /* interrupt mask */
+ u32 adma_rx_dbg0;
+ u32 int_grp;
+ } pdma;
+ struct {
+ u32 qtx_cfg; /* tx queue configuration */
+ u32 qtx_sch; /* tx queue scheduler configuration */
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 qcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 fc_th; /* flow control */
+ u32 int_grp;
+ u32 hred; /* interrupt mask */
+ u32 ctx_ptr; /* tx acquire cpu pointer */
+ u32 dtx_ptr; /* tx acquire dma pointer */
+ u32 crx_ptr; /* tx release cpu pointer */
+ u32 drx_ptr; /* tx release dma pointer */
+ u32 fq_head; /* fq head pointer */
+ u32 fq_tail; /* fq tail pointer */
+ u32 fq_count; /* fq free page count */
+ u32 fq_blen; /* fq free page buffer length */
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
+ } qdma;
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe[3];
+ u32 ppe_base;
+ u32 wdma_base[3];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+};
+
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
+ * @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
@@ -790,45 +1195,63 @@ enum mkt_eth_capabilities {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @version SoC version.
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
*/
struct mtk_soc_data {
+ const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
- u32 caps;
- u32 required_clks;
+ u64 caps;
+ u64 required_clks;
bool required_pctl;
+ u8 offload_version;
+ u8 hash_offset;
+ u8 version;
+ u8 ppe_num;
+ u16 foe_entry_size;
netdev_features_t hw_features;
+ bool has_accounting;
+ bool disable_pll_modes;
+ struct {
+ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ u32 dma_size;
+ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ u32 dma_size;
+ } rx;
};
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
-
-#define MTK_SGMII_PHYSPEED_AN BIT(31)
-#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
-#define MTK_SGMII_PHYSPEED_1000 BIT(0)
-#define MTK_SGMII_PHYSPEED_2500 BIT(1)
-#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
-
-/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
- * characteristics
- * @regmap: The register map pointing at the range used to setup
- * SGMII modes
- * @flags: The enum refers to which mode the sgmii wants to run on
- * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
- */
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
-struct mtk_sgmii {
- struct regmap *regmap[MTK_MAX_DEVS];
- u32 flags[MTK_MAX_DEVS];
- u32 ana_rgc3;
-};
+/* currently no SoC has more than 3 macs */
+#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
+ * @dma_dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
+ * @sram_pool: Pointer to SRAM pool used for DMA descriptor rings
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
* @rx_irq__lock: Make sure that IRQ register operations are atomic
+ * @dim_lock: Make sure that Net DIM operations are atomic
* @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
* dummy for NAPI to work
* @netdev: The netdev instances
@@ -839,6 +1262,7 @@ struct mtk_sgmii {
* MII modes
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
+ * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
@@ -847,6 +1271,14 @@ struct mtk_sgmii {
* @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
* @tx_napi: The TX NAPI struct
* @rx_napi: The RX NAPI struct
+ * @rx_events: Net DIM RX event counter
+ * @rx_packets: Net DIM RX packet counter
+ * @rx_bytes: Net DIM RX byte counter
+ * @rx_dim: Net DIM RX context
+ * @tx_events: Net DIM TX event counter
+ * @tx_packets: Net DIM TX packet counter
+ * @tx_bytes: Net DIM TX byte counter
+ * @tx_dim: Net DIM TX context
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
@@ -854,24 +1286,26 @@ struct mtk_sgmii {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
struct device *dev;
+ struct device *dma_dev;
void __iomem *base;
+ struct gen_pool *sram_pool;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq[3];
+ int irq[MTK_FE_IRQ_NUM];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
- struct regmap *infra;
- struct mtk_sgmii *sgmii;
+ struct regmap *infra;
+ struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -880,21 +1314,46 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi;
struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
+ void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
+ unsigned int mdc_divider;
struct work_struct pending_work;
unsigned long state;
const struct mtk_soc_data *soc;
- u32 tx_int_mask_reg;
- u32 tx_int_status_reg;
- u32 rx_dma_l4_valid;
+ spinlock_t dim_lock;
+
+ u32 rx_events;
+ u32 rx_packets;
+ u32 rx_bytes;
+ struct dim rx_dim;
+
+ u32 tx_events;
+ u32 tx_packets;
+ u32 tx_bytes;
+ struct dim tx_dim;
+
int ip_align;
+
+ struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
+
+ struct mtk_ppe *ppe[3];
+ struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
+
+ struct {
+ struct delayed_work monitor_work;
+ u32 wdidx;
+ u8 wdma_hang_count;
+ u8 qdma_hang_count;
+ u8 adma_hang_count;
+ } reset;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -908,7 +1367,7 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
- unsigned int mode;
+ u8 ppe_idx;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -917,26 +1376,144 @@ struct mtk_mac {
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
+ unsigned int syscfg0;
+ struct notifier_block device_notifier;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
+{
+ return eth->soc->version == 1;
+}
+
+static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 1;
+}
+
+static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 2;
+}
+
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
-
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
- u32 ana_rgc3);
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state);
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index);
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+
+
#endif /* MTK_ETH_H */