summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2016-12-03 22:16:00 +0000
committerRussell King <rmk+kernel@armlinux.org.uk>2019-07-09 12:20:56 +0100
commite266e59e5e3ca08d61c60da42f50a8a04cfff99a (patch)
tree46a1c68a477e0ad4db13787862f560d7fce95318
parent7f9a440a12a1cf16549d91f341922c2741f2091a (diff)
net: marvell: add mvpp2x driver
Add the Marvell PP2x driver as of faf69e0d635f in Marvell's tree, along with fixups for subsequent kernels: - mv_pp2x_get_stats64() now returns void - remove napi_hash_add() - formatting cleanups - replace XFI/SFI/KR phy modes with 10GKR mode - build warning fixes: drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c: In function 'mv_pp2x_ethtool_valid_coalesce': drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c:166:91: warning: integer overflow in expression [-Woverflow] drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c:172:91: warning: integer overflow in expression [-Woverflow] drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c:176:14: warning: integer overflow in expression [-Woverflow] drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c: In function 'mv_pp2x_rx_time_coal_set': drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:3483:30: warning: integer overflow in expression [-Woverflow] drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c: In function 'mv_pp2x_ethtool_valid_coalesce': drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c:169:3: warning: format '%ld' expects argument of type 'long int', but argument 2 has type '__u32' [-Wformat=] drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c:178:3: warning: format '%ld' expects argument of type 'long int', but argument 2 has type '__u32' [-Wformat=] - 10GBASE-KR only uses one serdes lane, not two - avoid calling pp21 event handler on pp22 hw - fix 2.5G SGMII handling to key off the 2.5G flag not the speed and make GOP queries return the correct speed - correct placement of dev_err() - rename driver so it does not conflict with the mvpp2 driver - fix oops when all ports fail to find PHYs - update for ethtool ksettings - build error fixes: drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h:562:24: error: field 'tx_done_tasklet' has incomplete type drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h:597:24: error: field 'link_change_tasklet' has incomplete type - further build error fixes: In file included from drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:29:0: drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h:253:2: error: unknown type name 'phy_interface_t' phy_interface_t phy_mode; /* RXAUI, SGMII, etc. */ ^ drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c: In function 'mv_pp21_port_mii_set': drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:3738:7: error: 'PHY_INTERFACE_MODE_SGMII' undeclared (first use in this function) case PHY_INTERFACE_MODE_SGMII: ^ drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:3738:7: note: each undeclared identifier is reported only once for each function it appears in drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:3741:7: error: 'PHY_INTERFACE_MODE_RGMII' undeclared (first use in this function) case PHY_INTERFACE_MODE_RGMII: ^ drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c: In function 'mv_pp21_port_loopback_set': drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c:3800:33: error: 'PHY_INTERFACE_MODE_SGMII' undeclared (first use in this function) if (port->mac_data.phy_mode == PHY_INTERFACE_MODE_SGMII) ^ - fix memset of classifier data Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
-rw-r--r--drivers/net/ethernet/marvell/Kconfig9
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.c3123
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.h472
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw_type.h1983
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h834
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.c98
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.h33
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c1245
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c6204
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.h796
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw_type.h2847
-rw-r--r--drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_main.c5428
14 files changed, 23078 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fb942167ee54..330bb38be27d 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -90,6 +90,15 @@ config MVPP2
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
+config MVPP2X
+ tristate "Marvell Armada 375/70xx/80xx network interface support"
+ depends on ARCH_MVEBU
+ select MVMDIO
+ select MVXMDIO
+ ---help---
+ This driver supports the network interface units in the
+ Marvell ARMADA 375/70xx/80xx SoC series
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on HAS_IOMEM
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 89dea7284d5b..3e0bf9963f55 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o
obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_MVPP2) += mvpp2/
+obj-$(CONFIG_MVPP2X) += mvpp2x/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvpp2x/Makefile b/drivers/net/ethernet/marvell/mvpp2x/Makefile
new file mode 100644
index 000000000000..f9a1e3e152a1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_MVPP2X) += mvpp2x.o
+
+mvpp2x-objs := mv_pp2x_ethtool.o mv_pp2x_hw.o mv_pp2x_main.o mv_pp2x_debug.o
+mvpp2x-objs += mv_gop110_hw.o
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.c b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.c
new file mode 100644
index 000000000000..e06d124f831f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.c
@@ -0,0 +1,3123 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/inetdevice.h>
+#include <uapi/linux/ppp_defs.h>
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mv_pp2x.h"
+#include "mv_gop110_hw.h"
+
+void mv_gop110_register_bases_dump(struct gop_hw *gop)
+{
+ pr_info(" %-32s: 0x%p\n", "GMAC", gop->gop_110.gmac.base);
+ pr_info(" %-32s: 0x%p\n", "XLG_MAC", gop->gop_110.xlg_mac.base);
+ pr_info(" %-32s: 0x%p\n", "XMIB", gop->gop_110.xmib.base);
+ pr_info(" %-32s: 0x%p\n", "SMI", gop->gop_110.smi_base);
+ pr_info(" %-32s: 0x%p\n", "XSMI", gop->gop_110.xsmi_base);
+ pr_info(" %-32s: 0x%p\n", "MSPG", gop->gop_110.mspg_base);
+ pr_info(" %-32s: 0x%p\n", "XPCS", gop->gop_110.xpcs_base);
+ pr_info(" %-32s: 0x%p\n", "PTP", gop->gop_110.ptp.base);
+ pr_info(" %-32s: 0x%p\n", "RFU1", gop->gop_110.rfu1_base);
+}
+EXPORT_SYMBOL(mv_gop110_register_bases_dump);
+
+/* print value of unit registers */
+void mv_gop110_gmac_regs_dump(struct gop_hw *gop, int port)
+{
+ int ind;
+ char reg_name[32];
+
+ mv_gop110_gmac_print(gop, "PORT_MAC_CTRL0", port,
+ MV_GMAC_PORT_CTRL0_REG);
+ mv_gop110_gmac_print(gop, "PORT_MAC_CTRL1", port,
+ MV_GMAC_PORT_CTRL1_REG);
+ mv_gop110_gmac_print(gop, "PORT_MAC_CTRL2", port,
+ MV_GMAC_PORT_CTRL2_REG);
+ mv_gop110_gmac_print(gop, "PORT_AUTO_NEG_CFG", port,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+ mv_gop110_gmac_print(gop, "PORT_STATUS0", port,
+ MV_GMAC_PORT_STATUS0_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERIAL_PARAM_CFG", port,
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_REG);
+ mv_gop110_gmac_print(gop, "PORT_FIFO_CFG_0", port,
+ MV_GMAC_PORT_FIFO_CFG_0_REG);
+ mv_gop110_gmac_print(gop, "PORT_FIFO_CFG_1", port,
+ MV_GMAC_PORT_FIFO_CFG_1_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERDES_CFG0", port,
+ MV_GMAC_PORT_SERDES_CFG0_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERDES_CFG1", port,
+ MV_GMAC_PORT_SERDES_CFG1_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERDES_CFG2", port,
+ MV_GMAC_PORT_SERDES_CFG2_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERDES_CFG3", port,
+ MV_GMAC_PORT_SERDES_CFG3_REG);
+ mv_gop110_gmac_print(gop, "PORT_PRBS_STATUS", port,
+ MV_GMAC_PORT_PRBS_STATUS_REG);
+ mv_gop110_gmac_print(gop, "PORT_PRBS_ERR_CNTR", port,
+ MV_GMAC_PORT_PRBS_ERR_CNTR_REG);
+ mv_gop110_gmac_print(gop, "PORT_STATUS1", port,
+ MV_GMAC_PORT_STATUS1_REG);
+ mv_gop110_gmac_print(gop, "PORT_MIB_CNTRS_CTRL", port,
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_REG);
+ mv_gop110_gmac_print(gop, "PORT_MAC_CTRL3", port,
+ MV_GMAC_PORT_CTRL3_REG);
+ mv_gop110_gmac_print(gop, "QSGMII", port,
+ MV_GMAC_QSGMII_REG);
+ mv_gop110_gmac_print(gop, "QSGMII_STATUS", port,
+ MV_GMAC_QSGMII_STATUS_REG);
+ mv_gop110_gmac_print(gop, "QSGMII_PRBS_CNTR", port,
+ MV_GMAC_QSGMII_PRBS_CNTR_REG);
+ for (ind = 0; ind < 8; ind++) {
+ sprintf(reg_name, "CCFC_PORT_SPEED_TIMER%d", ind);
+ mv_gop110_gmac_print(gop, reg_name, port,
+ MV_GMAC_CCFC_PORT_SPEED_TIMER_REG(ind));
+ }
+ for (ind = 0; ind < 4; ind++) {
+ sprintf(reg_name, "FC_DSA_TAG%d", ind);
+ mv_gop110_gmac_print(gop, reg_name, port,
+ MV_GMAC_FC_DSA_TAG_REG(ind));
+ }
+ mv_gop110_gmac_print(gop, "LINK_LEVEL_FLOW_CTRL_WIN_REG_0", port,
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0);
+ mv_gop110_gmac_print(gop, "LINK_LEVEL_FLOW_CTRL_WIN_REG_1", port,
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1);
+ mv_gop110_gmac_print(gop, "PORT_MAC_CTRL4", port,
+ MV_GMAC_PORT_CTRL4_REG);
+ mv_gop110_gmac_print(gop, "PORT_SERIAL_PARAM_1_CFG", port,
+ MV_GMAC_PORT_SERIAL_PARAM_1_CFG_REG);
+ mv_gop110_gmac_print(gop, "LPI_CTRL_0", port,
+ MV_GMAC_LPI_CTRL_0_REG);
+ mv_gop110_gmac_print(gop, "LPI_CTRL_1", port,
+ MV_GMAC_LPI_CTRL_1_REG);
+ mv_gop110_gmac_print(gop, "LPI_CTRL_2", port,
+ MV_GMAC_LPI_CTRL_2_REG);
+ mv_gop110_gmac_print(gop, "LPI_STATUS", port,
+ MV_GMAC_LPI_STATUS_REG);
+ mv_gop110_gmac_print(gop, "LPI_CNTR", port,
+ MV_GMAC_LPI_CNTR_REG);
+ mv_gop110_gmac_print(gop, "PULSE_1_MS_LOW", port,
+ MV_GMAC_PULSE_1_MS_LOW_REG);
+ mv_gop110_gmac_print(gop, "PULSE_1_MS_HIGH", port,
+ MV_GMAC_PULSE_1_MS_HIGH_REG);
+ mv_gop110_gmac_print(gop, "PORT_INT_MASK", port,
+ MV_GMAC_INTERRUPT_MASK_REG);
+ mv_gop110_gmac_print(gop, "INT_SUM_MASK", port,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG);
+}
+EXPORT_SYMBOL(mv_gop110_gmac_regs_dump);
+
+/* Set the MAC to reset or exit from reset */
+int mv_gop110_gmac_reset(struct gop_hw *gop, int mac_num, enum mv_reset reset)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_GMAC_PORT_CTRL2_REG;
+
+ /* read - modify - write */
+ val = mv_gop110_gmac_read(gop, mac_num, reg_addr);
+ if (reset == RESET)
+ val |= MV_GMAC_PORT_CTRL2_PORTMACRESET_MASK;
+ else
+ val &= ~MV_GMAC_PORT_CTRL2_PORTMACRESET_MASK;
+ mv_gop110_gmac_write(gop, mac_num, reg_addr, val);
+
+ return 0;
+}
+
+static void mv_gop110_gmac_rgmii_cfg(struct gop_hw *gop, int mac_num)
+{
+ u32 val, thresh, an;
+
+ /* configure minimal level of the Tx FIFO before the lower
+ * part starts to read a packet
+ */
+ thresh = MV_RGMII_TX_FIFO_MIN_TH;
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG, val);
+
+ /* Disable bypass of sync module */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL4_REG);
+ val |= MV_GMAC_PORT_CTRL4_SYNC_BYPASS_MASK;
+ /* configure DP clock select according to mode */
+ val &= ~MV_GMAC_PORT_CTRL4_DP_CLK_SEL_MASK;
+ val |= MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ val |= MV_GMAC_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL4_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL2_REG);
+ val |= MV_GMAC_PORT_CTRL2_CLK_125_BYPS_EN_MASK;
+ val &= ~MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL2_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ /* configure GIG MAC to SGMII mode */
+ val &= ~MV_GMAC_PORT_CTRL0_PORTTYPE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, val);
+
+ /* configure AN 0xb8e8 */
+ an = MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+static void mv_gop110_gmac_qsgmii_cfg(struct gop_hw *gop, int mac_num)
+{
+ u32 val, thresh, an;
+
+ /* configure minimal level of the Tx FIFO before the lower
+ * part starts to read a packet
+ */
+ thresh = MV_SGMII_TX_FIFO_MIN_TH;
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG, val);
+
+ /* Disable bypass of sync module */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL4_REG);
+ val |= MV_GMAC_PORT_CTRL4_SYNC_BYPASS_MASK;
+ /* configure DP clock select according to mode */
+ val &= ~MV_GMAC_PORT_CTRL4_DP_CLK_SEL_MASK;
+ val &= ~MV_GMAC_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ /* configure QSGMII bypass according to mode */
+ val &= ~MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL4_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL2_REG);
+ val &= ~MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL2_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ /* configure GIG MAC to SGMII mode */
+ val &= ~MV_GMAC_PORT_CTRL0_PORTTYPE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, val);
+
+ /* configure AN 0xB8EC */
+ an = MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+static void mv_gop110_gmac_sgmii_cfg(struct gop_hw *gop, int mac_num)
+{
+ u32 val, thresh, an;
+
+ /* configure minimal level of the Tx FIFO before the lower
+ * part starts to read a packet
+ */
+ thresh = MV_SGMII_TX_FIFO_MIN_TH;
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG, val);
+
+ /* Disable bypass of sync module */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL4_REG);
+ val |= MV_GMAC_PORT_CTRL4_SYNC_BYPASS_MASK;
+ /* configure DP clock select according to mode */
+ val &= ~MV_GMAC_PORT_CTRL4_DP_CLK_SEL_MASK;
+ /* configure QSGMII bypass according to mode */
+ val |= MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL4_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL2_REG);
+ val |= MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS;
+ val &= ~MV_GMAC_PORT_CTRL2_FC_MODE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL2_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ /* configure GIG MAC to SGMII mode */
+ val &= ~MV_GMAC_PORT_CTRL0_PORTTYPE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, val);
+
+ /* configure AN */
+ an = MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+static void mv_gop110_gmac_sgmii2_5_cfg(struct gop_hw *gop, int mac_num)
+{
+ u32 val, thresh, an;
+
+ /* configure minimal level of the Tx FIFO before the lower
+ * part starts to read a packet
+ */
+ thresh = MV_SGMII2_5_TX_FIFO_MIN_TH;
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_FIFO_CFG_1_REG, val);
+
+ /* Disable bypass of sync module */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL4_REG);
+ val |= MV_GMAC_PORT_CTRL4_SYNC_BYPASS_MASK;
+ /* configure DP clock select according to mode */
+ val |= MV_GMAC_PORT_CTRL4_DP_CLK_SEL_MASK;
+ /* configure QSGMII bypass according to mode */
+ val |= MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL4_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL2_REG);
+ val |= MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL2_REG, val);
+
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ /* configure GIG MAC to 1000Base-X mode connected to a
+ * fiber transceiver
+ */
+ val |= MV_GMAC_PORT_CTRL0_PORTTYPE_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, val);
+
+ /* configure AN 0x9260 */
+ an = MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK |
+ MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+/* Set the internal mux's to the required MAC in the GOP */
+int mv_gop110_gmac_mode_cfg(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ u32 reg_addr;
+ u32 val;
+
+ int mac_num = mac->gop_index;
+
+ /* Set TX FIFO thresholds */
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ if (mac->flags & MV_EMAC_F_SGMII2_5)
+ mv_gop110_gmac_sgmii2_5_cfg(gop, mac_num);
+ else
+ mv_gop110_gmac_sgmii_cfg(gop, mac_num);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ mv_gop110_gmac_rgmii_cfg(gop, mac_num);
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_qsgmii_cfg(gop, mac_num);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Control packets are forwarded into the ingress pipe */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_SERIAL_PARAM_CFG_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_MASK,
+ (0x1 << MV_GMAC_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_SERIAL_PARAM_CFG_REG, val);
+
+ /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ U32_SET_FIELD(val, MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_MASK,
+ (0x1400 << MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_OFFS));
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, val);
+
+ /* PeriodicXonEn disable */
+ reg_addr = MV_GMAC_PORT_CTRL1_REG;
+ val = mv_gop110_gmac_read(gop, mac_num, reg_addr);
+ val &= ~MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK;
+ mv_gop110_gmac_write(gop, mac_num, reg_addr, val);
+
+ /* mask all ports interrupts */
+ mv_gop110_gmac_port_link_event_mask(gop, mac_num);
+
+ /* unmask link change interrupt */
+ val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_INTERRUPT_MASK_REG);
+ val |= MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK;
+ val |= 1; /* unmask summary bit */
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_INTERRUPT_MASK_REG, val);
+
+ return 0;
+}
+
+/* Configure MAC loopback */
+int mv_gop110_gmac_loopback_cfg(struct gop_hw *gop, int mac_num,
+ enum mv_lb_type type)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_GMAC_PORT_CTRL1_REG;
+ val = mv_gop110_gmac_read(gop, mac_num, reg_addr);
+ switch (type) {
+ case MV_DISABLE_LB:
+ val &= ~MV_GMAC_PORT_CTRL1_GMII_LOOPBACK_MASK;
+ break;
+ case MV_TX_2_RX_LB:
+ val |= MV_GMAC_PORT_CTRL1_GMII_LOOPBACK_MASK;
+ break;
+ case MV_RX_2_TX_LB:
+ default:
+ return -1;
+ }
+ mv_gop110_gmac_write(gop, mac_num, reg_addr, val);
+
+ return 0;
+}
+
+/* Get MAC link status */
+bool mv_gop110_gmac_link_status_get(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_GMAC_PORT_STATUS0_REG;
+
+ val = mv_gop110_gmac_read(gop, mac_num, reg_addr);
+ return (val & 1) ? true : false;
+}
+
+/* Enable port and MIB counters */
+void mv_gop110_gmac_port_enable(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ reg_val |= MV_GMAC_PORT_CTRL0_PORTEN_MASK;
+ reg_val |= MV_GMAC_PORT_CTRL0_COUNT_EN_MASK;
+
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, reg_val);
+}
+
+/* Disable port */
+void mv_gop110_gmac_port_disable(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ /* mask all ports interrupts */
+ mv_gop110_gmac_port_link_event_mask(gop, mac_num);
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ reg_val &= ~MV_GMAC_PORT_CTRL0_PORTEN_MASK;
+
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, reg_val);
+}
+
+void mv_gop110_gmac_port_periodic_xon_set(struct gop_hw *gop,
+ int mac_num, int enable)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL1_REG);
+
+ if (enable)
+ reg_val |= MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK;
+ else
+ reg_val &= ~MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK;
+
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL1_REG, reg_val);
+}
+
+int mv_gop110_gmac_link_status(struct gop_hw *gop, int mac_num,
+ struct mv_port_link_status *pstatus)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_STATUS0_REG);
+
+ if (reg_val & MV_GMAC_PORT_STATUS0_GMIISPEED_MASK)
+ pstatus->speed = MV_PORT_SPEED_1000;
+ else if (reg_val & MV_GMAC_PORT_STATUS0_MIISPEED_MASK)
+ pstatus->speed = MV_PORT_SPEED_100;
+ else
+ pstatus->speed = MV_PORT_SPEED_10;
+
+ if (reg_val & MV_GMAC_PORT_STATUS0_LINKUP_MASK)
+ pstatus->linkup = 1 /*TRUE*/;
+ else
+ pstatus->linkup = 0 /*FALSE*/;
+
+ if (reg_val & MV_GMAC_PORT_STATUS0_FULLDX_MASK)
+ pstatus->duplex = MV_PORT_DUPLEX_FULL;
+ else
+ pstatus->duplex = MV_PORT_DUPLEX_HALF;
+
+ if (reg_val & MV_GMAC_PORT_STATUS0_PORTTXPAUSE_MASK)
+ pstatus->tx_fc = MV_PORT_FC_ACTIVE;
+ else if (reg_val & MV_GMAC_PORT_STATUS0_TXFCEN_MASK)
+ pstatus->tx_fc = MV_PORT_FC_ENABLE;
+ else
+ pstatus->tx_fc = MV_PORT_FC_DISABLE;
+
+ if (reg_val & MV_GMAC_PORT_STATUS0_PORTRXPAUSE_MASK)
+ pstatus->rx_fc = MV_PORT_FC_ACTIVE;
+ else if (reg_val & MV_GMAC_PORT_STATUS0_RXFCEN_MASK)
+ pstatus->rx_fc = MV_PORT_FC_ENABLE;
+ else
+ pstatus->rx_fc = MV_PORT_FC_DISABLE;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK) {
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK)
+ pstatus->autoneg_fc = MV_PORT_FC_AN_ASYM;
+ else
+ pstatus->autoneg_fc = MV_PORT_FC_AN_SYM;
+ } else {
+ pstatus->autoneg_fc = MV_PORT_FC_AN_NO;
+ }
+
+ return 0;
+}
+
+/* Change maximum receive size of the port */
+int mv_gop110_gmac_max_rx_size_set(struct gop_hw *gop,
+ int mac_num, int max_rx_size)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL0_REG);
+ reg_val &= ~MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_MASK;
+ reg_val |= (((max_rx_size - MVPP2_MH_SIZE) / 2) <<
+ MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_OFFS);
+ mv_gop110_gmac_write(gop, mac_num, MV_GMAC_PORT_CTRL0_REG, reg_val);
+
+ return 0;
+}
+
+/* Sets "Force Link Pass" and "Do Not Force Link Fail" bits.
+* This function should only be called when the port is disabled.
+* INPUT:
+* int port - port number
+* bool force_link_pass - Force Link Pass
+* bool force_link_fail - Force Link Failure
+* 0, 0 - normal state: detect link via PHY and connector
+* 1, 1 - prohibited state.
+*/
+int mv_gop110_gmac_force_link_mode_set(struct gop_hw *gop, int mac_num,
+ bool force_link_up,
+ bool force_link_down)
+{
+ u32 reg_val;
+
+ /* Can't force link pass and link fail at the same time */
+ if ((force_link_up) && (force_link_down))
+ return -EINVAL;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (force_link_up)
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK;
+ else
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK;
+
+ if (force_link_down)
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK;
+ else
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK;
+
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG, reg_val);
+
+ return 0;
+}
+
+/* Get "Force Link Pass" and "Do Not Force Link Fail" bits.
+* INPUT:
+* int port - port number
+* OUTPUT:
+* bool *force_link_pass - Force Link Pass
+* bool *force_link_fail - Force Link Failure
+*/
+int mv_gop110_gmac_force_link_mode_get(struct gop_hw *gop, int mac_num,
+ bool *force_link_up,
+ bool *force_link_down)
+{
+ u32 reg_val;
+
+ /* Can't force link pass and link fail at the same time */
+ if ((!force_link_up) || (!force_link_down))
+ return -EINVAL;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK)
+ *force_link_up = true;
+ else
+ *force_link_up = false;
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK)
+ *force_link_down = true;
+ else
+ *force_link_down = false;
+
+ return 0;
+}
+
+/* Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+* Sets port duplex to Auto Negotiation / Full / Half Duplex.
+*/
+int mv_gop110_gmac_speed_duplex_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex)
+{
+ u32 reg_val;
+
+ /* Check validity */
+ if ((speed == MV_PORT_SPEED_1000) && (duplex == MV_PORT_DUPLEX_HALF))
+ return -EINVAL;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ switch (speed) {
+ case MV_PORT_SPEED_AN:
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ /* the other bits don't matter in this case */
+ break;
+ case MV_PORT_SPEED_2500:
+ case MV_PORT_SPEED_1000:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ /* the 100/10 bit doesn't matter in this case */
+ break;
+ case MV_PORT_SPEED_100:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK;
+ break;
+ case MV_PORT_SPEED_10:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK;
+ break;
+ default:
+ pr_info("GMAC: Unexpected Speed value %d\n", speed);
+ return -EINVAL;
+ }
+
+ switch (duplex) {
+ case MV_PORT_DUPLEX_AN:
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ /* the other bits don't matter in this case */
+ break;
+ case MV_PORT_DUPLEX_HALF:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK;
+ break;
+ case MV_PORT_DUPLEX_FULL:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK;
+ break;
+ default:
+ pr_err("GMAC: Unexpected Duplex value %d\n", duplex);
+ return -EINVAL;
+ }
+
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG, reg_val);
+ return 0;
+}
+
+/* Gets port speed and duplex */
+int mv_gop110_gmac_speed_duplex_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex)
+{
+ u32 reg_val;
+
+ /* Check validity */
+ if (!speed || !duplex)
+ return -EINVAL;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK)
+ *speed = MV_PORT_SPEED_AN;
+ else if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK)
+ *speed = MV_PORT_SPEED_1000;
+ else if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK)
+ *speed = MV_PORT_SPEED_100;
+ else
+ *speed = MV_PORT_SPEED_10;
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK)
+ *duplex = MV_PORT_DUPLEX_AN;
+ else if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK)
+ *duplex = MV_PORT_DUPLEX_FULL;
+ else
+ *duplex = MV_PORT_DUPLEX_HALF;
+
+ return 0;
+}
+
+/* Configure the port's Flow Control properties */
+int mv_gop110_gmac_fc_set(struct gop_hw *gop, int mac_num, enum mv_port_fc fc)
+{
+ u32 reg_val;
+ u32 fc_en;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+ fc_en = mv_gop110_gmac_read(gop, mac_num, MV_GMAC_PORT_CTRL4_REG);
+
+ switch (fc) {
+ case MV_PORT_FC_AN_NO:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK;
+ break;
+
+ case MV_PORT_FC_AN_SYM:
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK;
+ break;
+
+ case MV_PORT_FC_AN_ASYM:
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK;
+ break;
+
+ case MV_PORT_FC_DISABLE:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK;
+ fc_en &= ~MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK;
+ fc_en &= ~MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK;
+ break;
+
+ case MV_PORT_FC_TX_DISABLE:
+ fc_en &= ~MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK;
+ break;
+
+ case MV_PORT_FC_RX_DISABLE:
+ fc_en &= ~MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK;
+ break;
+
+ case MV_PORT_FC_ENABLE:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ fc_en |= MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK;
+ fc_en |= MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK;
+ break;
+
+ case MV_PORT_FC_TX_ENABLE:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ fc_en |= MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK;
+ break;
+
+ case MV_PORT_FC_RX_ENABLE:
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK;
+ fc_en |= MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK;
+ break;
+
+ default:
+ pr_err("GMAC: Unexpected FlowControl value %d\n", fc);
+ return -EINVAL;
+ }
+
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_CTRL4_REG, fc_en);
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG, reg_val);
+ return 0;
+}
+
+/* Get Flow Control configuration of the port */
+void mv_gop110_gmac_fc_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc *fc)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK) {
+ /* Auto negotiation is enabled */
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK) {
+ if (reg_val &
+ MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK)
+ *fc = MV_PORT_FC_AN_ASYM;
+ else
+ *fc = MV_PORT_FC_AN_SYM;
+ } else {
+ *fc = MV_PORT_FC_AN_NO;
+ }
+ } else {
+ /* Auto negotiation is disabled */
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_CTRL4_REG);
+ if ((reg_val & MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK) &&
+ (reg_val & MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK))
+ *fc = MV_PORT_FC_ENABLE;
+ else
+ *fc = MV_PORT_FC_DISABLE;
+ }
+}
+
+int mv_gop110_gmac_port_link_speed_fc(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ int force_link_up)
+{
+ if (force_link_up) {
+ if (mv_gop110_gmac_speed_duplex_set(gop, mac_num, speed,
+ MV_PORT_DUPLEX_FULL)) {
+ pr_err("mv_gop110_gmac_speed_duplex_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_gmac_fc_set(gop, mac_num, MV_PORT_FC_ENABLE)) {
+ pr_err("mv_gop110_gmac_fc_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_gmac_force_link_mode_set(gop, mac_num, 1, 0)) {
+ pr_err("mv_gop110_gmac_force_link_mode_set failed\n");
+ return -EPERM;
+ }
+ } else {
+ if (mv_gop110_gmac_force_link_mode_set(gop, mac_num, 0, 0)) {
+ pr_err("mv_gop110_gmac_force_link_mode_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_gmac_speed_duplex_set(gop, mac_num,
+ MV_PORT_SPEED_AN,
+ MV_PORT_DUPLEX_AN)) {
+ pr_err("mv_gop110_gmac_speed_duplex_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_gmac_fc_set(gop, mac_num, MV_PORT_FC_AN_SYM)) {
+ pr_err("mv_gop110_gmac_fc_set failed\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void mv_gop110_gmac_port_link_event_mask(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG);
+ reg_val &= ~MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG, reg_val);
+}
+
+void mv_gop110_gmac_port_link_event_unmask(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG);
+ reg_val |= MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
+ reg_val |= 1; /* unmask summary bit */
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG, reg_val);
+}
+
+void mv_gop110_gmac_port_link_event_clear(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_INTERRUPT_CAUSE_REG);
+}
+
+int mv_gop110_gmac_port_autoneg_restart(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+ /* enable AN and restart it */
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_MASK;
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG, reg_val);
+ return 0;
+}
+
+/*************************************************************************
+* mv_port_init
+*
+* DESCRIPTION:
+* Init physical port. Configures the port mode and all it's elements
+* accordingly.
+* Does not verify that the selected mode/port number is valid at the
+* core level.
+*
+* INPUTS:
+* port_num - physical port number
+* port_mode - port standard metric
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* 0 - on success
+* 1 - on error
+*
+*************************************************************************/
+int mv_gop110_port_init(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int num_of_act_lanes;
+ int mac_num = mac->gop_index;
+
+ if (mac_num >= MVCPN110_GOP_MAC_NUM) {
+ pr_err("%s: illegal port number %d", __func__, mac_num);
+ return -1;
+ }
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ mv_gop110_force_link_mode_set(gop, mac, false, true);
+ mv_gop110_gmac_reset(gop, mac_num, RESET);
+ /* configure PCS */
+ mv_gop110_gpcs_mode_cfg(gop, mac_num, false);
+
+ /* configure MAC */
+ mv_gop110_gmac_mode_cfg(gop, mac);
+
+ /* select proper Mac mode */
+ mv_gop110_xlg_2_gig_mac_cfg(gop, mac_num);
+
+ /* pcs unreset */
+ mv_gop110_gpcs_reset(gop, mac_num, UNRESET);
+ /* mac unreset */
+ mv_gop110_gmac_reset(gop, mac_num, UNRESET);
+ mv_gop110_force_link_mode_set(gop, mac, false, false);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_force_link_mode_set(gop, mac, false, true);
+ mv_gop110_gmac_reset(gop, mac_num, RESET);
+ /* configure PCS */
+ mv_gop110_gpcs_mode_cfg(gop, mac_num, true);
+
+ /* configure MAC */
+ mv_gop110_gmac_mode_cfg(gop, mac);
+ /* select proper Mac mode */
+ mv_gop110_xlg_2_gig_mac_cfg(gop, mac_num);
+
+ /* pcs unreset */
+ mv_gop110_gpcs_reset(gop, mac_num, UNRESET);
+ /* mac unreset */
+ mv_gop110_gmac_reset(gop, mac_num, UNRESET);
+ mv_gop110_force_link_mode_set(gop, mac, false, false);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ num_of_act_lanes = 4;
+ mac_num = 0;
+ /* configure PCS */
+ mv_gop110_xpcs_mode(gop, num_of_act_lanes);
+ /* configure MAC */
+ mv_gop110_xlg_mac_mode_cfg(gop, mac_num, num_of_act_lanes);
+
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, UNRESET);
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, UNRESET);
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ num_of_act_lanes = 2;
+ mac_num = 0;
+ /* configure PCS */
+ mv_gop110_xpcs_mode(gop, num_of_act_lanes);
+ /* configure MAC */
+ mv_gop110_xlg_mac_mode_cfg(gop, mac_num, num_of_act_lanes);
+
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, UNRESET);
+
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, UNRESET);
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ num_of_act_lanes = 1;
+ mac_num = 0;
+ /* configure PCS */
+ mv_gop110_xpcs_mode(gop, num_of_act_lanes);
+ mv_gop110_mpcs_mode(gop);
+ /* configure MAC */
+ mv_gop110_xlg_mac_mode_cfg(gop, mac_num, num_of_act_lanes);
+
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, UNRESET);
+
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, UNRESET);
+ break;
+ default:
+ pr_err("%s: Requested port mode (%d) not supported",
+ __func__, mac->phy_mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**************************************************************************
+* mv_port_reset
+*
+* DESCRIPTION:
+* Clears the port mode and release all its resources
+* according to selected.
+* Does not verify that the selected mode/port number is valid at the core
+* level and actual terminated mode.
+*
+* INPUTS:
+* port_num - physical port number
+* port_mode - port standard metric
+* action - Power down or reset
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* 0 - on success
+* 1 - on error
+*
+**************************************************************************/
+int mv_gop110_port_reset(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int mac_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* pcs unreset */
+ mv_gop110_gpcs_reset(gop, mac_num, RESET);
+ /* mac unreset */
+ mv_gop110_gmac_reset(gop, mac_num, RESET);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, RESET);
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, RESET);
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, RESET);
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, RESET);
+ break;
+ /* Stefan: need to check KR case */
+ case PHY_INTERFACE_MODE_10GKR:
+ /* pcs unreset */
+ mv_gop110_xpcs_reset(gop, RESET);
+ /* mac unreset */
+ mv_gop110_xlg_mac_reset(gop, mac_num, RESET);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+
+ /* TBD:serdes reset or power down if needed*/
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------*/
+void mv_gop110_port_enable(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_enable(gop, port_num);
+ mv_gop110_force_link_mode_set(gop, mac, false, false);
+ mv_gop110_gmac_reset(gop, port_num, UNRESET);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_mpcs_clock_reset(gop, UNRESET);
+ mv_gop110_xlg_mac_reset(gop, port_num, UNRESET);
+ mv_gop110_xlg_mac_port_enable(gop, port_num);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return;
+ }
+}
+
+void mv_gop110_port_disable(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_disable(gop, port_num);
+ mv_gop110_force_link_mode_set(gop, mac, false, true);
+ mv_gop110_gmac_reset(gop, port_num, RESET);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_mac_port_disable(gop, port_num);
+ mv_gop110_xlg_mac_reset(gop, port_num, RESET);
+ mv_gop110_mpcs_clock_reset(gop, RESET);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return;
+ }
+}
+
+void mv_gop110_port_periodic_xon_set(struct gop_hw *gop,
+ struct mv_mac_data *mac,
+ int enable)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_periodic_xon_set(gop, port_num, enable);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_mac_port_periodic_xon_set(gop, port_num, enable);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return;
+ }
+}
+
+bool mv_gop110_port_is_link_up(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return mv_gop110_gmac_link_status_get(gop, port_num);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return mv_gop110_xlg_mac_link_status_get(gop, port_num);
+ break;
+ default:
+ pr_err("%s: Wrong port mode gop_port(%d), phy_mode(%d)",
+ __func__, port_num, mac->phy_mode);
+ return false;
+ }
+}
+
+int mv_gop110_port_link_status(struct gop_hw *gop, struct mv_mac_data *mac,
+ struct mv_port_link_status *pstatus)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_link_status(gop, port_num, pstatus);
+ if (pstatus->speed == MV_PORT_SPEED_1000 &&
+ mac->phy_mode == PHY_INTERFACE_MODE_SGMII &&
+ mac->flags & MV_EMAC_F_SGMII2_5)
+ pstatus->speed = MV_PORT_SPEED_2500;
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_mac_link_status(gop, port_num, pstatus);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+bool mv_gop110_port_autoneg_status(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, mac->gop_index, MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS &&
+ reg_val & MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK)
+ return true;
+ else
+ return false;
+}
+
+int mv_gop110_check_port_type(struct gop_hw *gop, int port_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_gmac_read(gop, port_num, MV_GMAC_PORT_CTRL0_REG);
+ return (reg_val & MV_GMAC_PORT_CTRL0_PORTTYPE_MASK) >>
+ MV_GMAC_PORT_CTRL0_PORTTYPE_OFFS;
+}
+
+void mv_gop110_gmac_set_autoneg(struct gop_hw *gop, struct mv_mac_data *mac, bool auto_neg)
+{
+ u32 reg_val;
+ int mac_num = mac->gop_index;
+
+ reg_val = mv_gop110_gmac_read(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+
+ if (auto_neg) {
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ reg_val |= MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ }
+
+ else {
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ reg_val &= ~MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ }
+
+ mv_gop110_gmac_write(gop, mac_num,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG, reg_val);
+}
+
+int mv_gop110_port_regs(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ pr_info("\n[gop GMAC #%d registers]\n", port_num);
+ mv_gop110_gmac_regs_dump(gop, port_num);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ pr_info("\n[gop XLG MAC #%d registers]\n", port_num);
+ mv_gop110_xlg_mac_regs_dump(gop, port_num);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_port_events_mask(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_link_event_mask(gop, port_num);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_port_link_event_mask(gop, port_num);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_port_events_unmask(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_link_event_unmask(gop, port_num);
+ /* gige interrupt cause connected to CPU via XLG
+ * external interrupt cause
+ */
+ mv_gop110_xlg_port_external_event_unmask(gop, 0, 2);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_port_external_event_unmask(gop, port_num, 1);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_port_events_clear(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_link_event_clear(gop, port_num);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_port_link_event_clear(gop, port_num);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_status_show(struct gop_hw *gop, struct mv_pp2x *pp2, int port_num)
+{
+ struct mv_port_link_status port_status;
+ struct mv_pp2x_port *pp_port;
+ struct mv_mac_data *mac;
+
+ pp_port = mv_pp2x_port_struct_get(pp2, port_num);
+ mac = &pp_port->mac_data;
+
+ mv_gop110_port_link_status(gop, mac, &port_status);
+
+ pr_info("-------------- Port %d configuration ----------------",
+ port_num);
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ pr_info("Port mode : RGMII");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ pr_info("Port mode : SGMII");
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ pr_info("Port mode : QSGMII");
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ pr_info("Port mode : XAUI");
+ break;
+ case PHY_INTERFACE_MODE_RXAUI:
+ pr_info("Port mode : RXAUI");
+ break;
+ case PHY_INTERFACE_MODE_10GKR:
+ pr_info("Port mode : 10GBASE-KR");
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+
+ pr_info("\nLink status : %s",
+ (port_status.linkup) ? "link up" : "link down");
+ pr_info("\n");
+
+ switch (port_status.speed) {
+ case MV_PORT_SPEED_AN:
+ pr_info("Port speed : AutoNeg");
+ break;
+ case MV_PORT_SPEED_10:
+ pr_info("Port speed : 10M");
+ break;
+ case MV_PORT_SPEED_100:
+ pr_info("Port speed : 100M");
+ break;
+ case MV_PORT_SPEED_1000:
+ pr_info("Port speed : 1G");
+ break;
+ case MV_PORT_SPEED_2500:
+ pr_info("Port speed : 2.5G");
+ break;
+ case MV_PORT_SPEED_10000:
+ pr_info("Port speed : 10G");
+ break;
+ default:
+ pr_err("%s: Wrong port speed (%d)\n", __func__,
+ port_status.speed);
+ return -1;
+ }
+ pr_info("\n");
+ switch (port_status.duplex) {
+ case MV_PORT_DUPLEX_AN:
+ pr_info("Port duplex : AutoNeg");
+ break;
+ case MV_PORT_DUPLEX_HALF:
+ pr_info("Port duplex : half");
+ break;
+ case MV_PORT_DUPLEX_FULL:
+ pr_info("Port duplex : full");
+ break;
+ default:
+ pr_err("%s: Wrong port duplex (%d)", __func__,
+ port_status.duplex);
+ return -1;
+ }
+ pr_info("\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_gop110_status_show);
+
+/* get port speed and duplex */
+int mv_gop110_speed_duplex_get(struct gop_hw *gop, struct mv_mac_data *mac,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_speed_duplex_get(gop, port_num, speed,
+ duplex);
+ if (*speed == MV_PORT_SPEED_1000 &&
+ mac->phy_mode == PHY_INTERFACE_MODE_SGMII &&
+ mac->flags & MV_EMAC_F_SGMII2_5)
+ *speed = MV_PORT_SPEED_2500;
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_mac_speed_duplex_get(gop, port_num, speed,
+ duplex);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+/* set port speed and duplex */
+int mv_gop110_speed_duplex_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_speed_duplex_set(gop, port_num, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_xlg_mac_speed_duplex_set(gop, port_num, speed,
+ duplex);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_autoneg_restart(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_gmac_port_autoneg_restart(gop, port_num);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ pr_err("%s: on supported for port mode (%d)", __func__,
+ mac->phy_mode);
+ return -1;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+int mv_gop110_fl_cfg(struct gop_hw *gop, struct mv_mac_data *mac)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* disable AN */
+ if (mac->flags & MV_EMAC_F_SGMII2_5)
+ mv_gop110_speed_duplex_set(gop, mac,
+ MV_PORT_SPEED_2500,
+ MV_PORT_DUPLEX_FULL);
+ else
+ mv_gop110_speed_duplex_set(gop, mac,
+ MV_PORT_SPEED_1000,
+ MV_PORT_DUPLEX_FULL);
+ /* force link */
+ mv_gop110_gmac_force_link_mode_set(gop, port_num, true, false);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return 0;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+/* set port ForceLinkUp and ForceLinkDown*/
+int mv_gop110_force_link_mode_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool force_link_up,
+ bool force_link_down)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* force link */
+ mv_gop110_gmac_force_link_mode_set(gop, port_num,
+ force_link_up,
+ force_link_down);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return 0;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+/* get port ForceLinkUp and ForceLinkDown*/
+int mv_gop110_force_link_mode_get(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool *force_link_up,
+ bool *force_link_down)
+{
+ int port_num = mac->gop_index;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return mv_gop110_gmac_force_link_mode_get(gop, port_num,
+ force_link_up,
+ force_link_down);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return 0;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+/* set port internal loopback*/
+int mv_gop110_loopback_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool lb)
+{
+ int port_num = mac->gop_index;
+ enum mv_lb_type type;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* set loopback */
+ if (lb)
+ type = MV_TX_2_RX_LB;
+ else
+ type = MV_DISABLE_LB;
+
+ mv_gop110_gmac_loopback_cfg(gop, port_num, type);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return 0;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+ return 0;
+}
+
+/**************************************************************************
+* mv_gop110_gpcs_mode_cfg
+*
+* DESCRIPTION:
+ Configure port to working with Gig PCS or don't.
+*
+* INPUTS:
+* pcs_num - physical PCS number
+* en - true to enable PCS
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* 0 - on success
+* 1 - on error
+*
+**************************************************************************/
+int mv_gop110_gpcs_mode_cfg(struct gop_hw *gop, int pcs_num, bool en)
+{
+ u32 val;
+
+ val = mv_gop110_gmac_read(gop, pcs_num, MV_GMAC_PORT_CTRL2_REG);
+
+ if (en)
+ val |= MV_GMAC_PORT_CTRL2_PCS_EN_MASK;
+ else
+ val &= ~MV_GMAC_PORT_CTRL2_PCS_EN_MASK;
+
+ /* enable / disable PCS on this port */
+ mv_gop110_gmac_write(gop, pcs_num, MV_GMAC_PORT_CTRL2_REG, val);
+
+ return 0;
+}
+
+/**************************************************************************
+* mv_gop110_gpcs_reset
+*
+* DESCRIPTION:
+* Set the selected PCS number to reset or exit from reset.
+*
+* INPUTS:
+* pcs_num - physical PCS number
+* action - reset / unreset
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* 0 - on success
+* 1 - on error
+*
+*************************************************************************/
+int mv_gop110_gpcs_reset(struct gop_hw *gop, int pcs_num, enum mv_reset act)
+{
+ u32 reg_data;
+
+ reg_data = mv_gop110_gmac_read(gop, pcs_num, MV_GMAC_PORT_CTRL2_REG);
+ if (act == RESET)
+ U32_SET_FIELD(reg_data, MV_GMAC_PORT_CTRL2_SGMII_MODE_MASK, 0);
+ else
+ U32_SET_FIELD(reg_data, MV_GMAC_PORT_CTRL2_SGMII_MODE_MASK,
+ 1 << MV_GMAC_PORT_CTRL2_SGMII_MODE_OFFS);
+
+ mv_gop110_gmac_write(gop, pcs_num, MV_GMAC_PORT_CTRL2_REG, reg_data);
+ return 0;
+}
+
+/**************************************************************************
+* mv_gop110_smi_init
+**************************************************************************/
+int mv_gop110_smi_init(struct gop_hw *gop)
+{
+ u32 val;
+
+ /* not invert MDC */
+ val = mv_gop110_smi_read(gop, MV_SMI_MISC_CFG_REG);
+ val &= ~MV_SMI_MISC_CFG_INVERT_MDC_MASK;
+ mv_gop110_smi_write(gop, MV_SMI_MISC_CFG_REG, val);
+
+ return 0;
+}
+
+/**************************************************************************
+* mv_gop_phy_addr_cfg
+**************************************************************************/
+int mv_gop110_smi_phy_addr_cfg(struct gop_hw *gop, int port, int addr)
+{
+ mv_gop110_smi_write(gop, MV_SMI_PHY_ADDRESS_REG(port), addr);
+
+ return 0;
+}
+
+/* print value of unit registers */
+void mv_gop110_xlg_mac_regs_dump(struct gop_hw *gop, int port)
+{
+ int timer;
+ char reg_name[16];
+
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL0", port,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL1", port,
+ MV_XLG_PORT_MAC_CTRL1_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL2", port,
+ MV_XLG_PORT_MAC_CTRL2_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_STATUS", port,
+ MV_XLG_MAC_PORT_STATUS_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_FIFOS_THRS_CFG", port,
+ MV_XLG_PORT_FIFOS_THRS_CFG_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL3", port,
+ MV_XLG_PORT_MAC_CTRL3_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_PER_PRIO_FLOW_CTRL_STATUS", port,
+ MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG);
+ mv_gop110_xlg_mac_print(gop, "DEBUG_BUS_STATUS", port,
+ MV_XLG_DEBUG_BUS_STATUS_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_METAL_FIX", port,
+ MV_XLG_PORT_METAL_FIX_REG);
+ mv_gop110_xlg_mac_print(gop, "XG_MIB_CNTRS_CTRL", port,
+ MV_XLG_MIB_CNTRS_CTRL_REG);
+ for (timer = 0; timer < 8; timer++) {
+ sprintf(reg_name, "CNCCFC_TIMER%d", timer);
+ mv_gop110_xlg_mac_print(gop, reg_name, port,
+ MV_XLG_CNCCFC_TIMERI_REG(timer));
+ }
+ mv_gop110_xlg_mac_print(gop, "PPFC_CTRL", port,
+ MV_XLG_MAC_PPFC_CTRL_REG);
+ mv_gop110_xlg_mac_print(gop, "FC_DSA_TAG_0", port,
+ MV_XLG_MAC_FC_DSA_TAG_0_REG);
+ mv_gop110_xlg_mac_print(gop, "FC_DSA_TAG_1", port,
+ MV_XLG_MAC_FC_DSA_TAG_1_REG);
+ mv_gop110_xlg_mac_print(gop, "FC_DSA_TAG_2", port,
+ MV_XLG_MAC_FC_DSA_TAG_2_REG);
+ mv_gop110_xlg_mac_print(gop, "FC_DSA_TAG_3", port,
+ MV_XLG_MAC_FC_DSA_TAG_3_REG);
+ mv_gop110_xlg_mac_print(gop, "DIC_BUDGET_COMPENSATION", port,
+ MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL4", port,
+ MV_XLG_PORT_MAC_CTRL4_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_MAC_CTRL5", port,
+ MV_XLG_PORT_MAC_CTRL5_REG);
+ mv_gop110_xlg_mac_print(gop, "EXT_CTRL", port,
+ MV_XLG_MAC_EXT_CTRL_REG);
+ mv_gop110_xlg_mac_print(gop, "MACRO_CTRL", port,
+ MV_XLG_MAC_MACRO_CTRL_REG);
+ mv_gop110_xlg_mac_print(gop, "MACRO_CTRL", port,
+ MV_XLG_MAC_MACRO_CTRL_REG);
+ mv_gop110_xlg_mac_print(gop, "PORT_INT_MASK", port,
+ MV_XLG_INTERRUPT_MASK_REG);
+ mv_gop110_xlg_mac_print(gop, "EXTERNAL_INT_MASK", port,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
+}
+EXPORT_SYMBOL(mv_gop110_xlg_mac_regs_dump);
+
+/* Set the MAC to reset or exit from reset */
+int mv_gop110_xlg_mac_reset(struct gop_hw *gop, int mac_num,
+ enum mv_reset reset)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_XLG_PORT_MAC_CTRL0_REG;
+
+ /* read - modify - write */
+ val = mv_gop110_xlg_mac_read(gop, mac_num, reg_addr);
+ if (reset == RESET)
+ val &= ~MV_XLG_MAC_CTRL0_MACRESETN_MASK;
+ else
+ val |= MV_XLG_MAC_CTRL0_MACRESETN_MASK;
+ mv_gop110_xlg_mac_write(gop, mac_num, reg_addr, val);
+
+ return 0;
+}
+
+/* Set the internal mux's to the required MAC in the GOP */
+int mv_gop110_xlg_mac_mode_cfg(struct gop_hw *gop, int mac_num,
+ int num_of_act_lanes)
+{
+ u32 reg_addr;
+ u32 val;
+
+ /* configure 10G MAC mode */
+ reg_addr = MV_XLG_PORT_MAC_CTRL0_REG;
+ val = mv_gop110_xlg_mac_read(gop, mac_num, reg_addr);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL0_RXFCEN_MASK,
+ (1 << MV_XLG_MAC_CTRL0_RXFCEN_OFFS));
+ mv_gop110_xlg_mac_write(gop, mac_num, reg_addr, val);
+
+ reg_addr = MV_XLG_PORT_MAC_CTRL3_REG;
+ val = mv_gop110_xlg_mac_read(gop, mac_num, reg_addr);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL3_MACMODESELECT_MASK,
+ (1 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS));
+ mv_gop110_xlg_mac_write(gop, mac_num, reg_addr, val);
+
+ reg_addr = MV_XLG_PORT_MAC_CTRL4_REG;
+
+ /* read - modify - write */
+ val = mv_gop110_xlg_mac_read(gop, mac_num, reg_addr);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK, 0 <<
+ MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK, 1 <<
+ MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK, 1 <<
+ MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_MASK, 0 <<
+ MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_OFFS);
+ mv_gop110_xlg_mac_write(gop, mac_num, reg_addr, val);
+
+ /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
+ val = mv_gop110_xlg_mac_read(gop, mac_num, MV_XLG_PORT_MAC_CTRL1_REG);
+ U32_SET_FIELD(val, MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK, 0x1400);
+ mv_gop110_xlg_mac_write(gop, mac_num, MV_XLG_PORT_MAC_CTRL1_REG, val);
+
+ /* mask all port interrupts */
+ mv_gop110_xlg_port_link_event_mask(gop, mac_num);
+
+ /* unmask link change interrupt */
+ val = mv_gop110_xlg_mac_read(gop, mac_num, MV_XLG_INTERRUPT_MASK_REG);
+ val |= MV_XLG_INTERRUPT_LINK_CHANGE_MASK;
+ val |= 1; /* unmask summary bit */
+ mv_gop110_xlg_mac_write(gop, mac_num, MV_XLG_INTERRUPT_MASK_REG, val);
+
+ return 0;
+}
+
+/* Configure MAC loopback */
+int mv_gop110_xlg_mac_loopback_cfg(struct gop_hw *gop, int mac_num,
+ enum mv_lb_type type)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_XLG_PORT_MAC_CTRL1_REG;
+ val = mv_gop110_xlg_mac_read(gop, mac_num, reg_addr);
+ switch (type) {
+ case MV_DISABLE_LB:
+ val &= ~MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK;
+ val &= ~MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK;
+ break;
+ case MV_RX_2_TX_LB:
+ val &= ~MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK;
+ val |= MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK;
+ break;
+ case MV_TX_2_RX_LB:
+ val |= MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK;
+ val |= MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK;
+ break;
+ default:
+ return -1;
+ }
+ mv_gop110_xlg_mac_write(gop, mac_num, reg_addr, val);
+ return 0;
+}
+
+/* Get MAC link status */
+bool mv_gop110_xlg_mac_link_status_get(struct gop_hw *gop, int mac_num)
+{
+ if (mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_MAC_PORT_STATUS_REG) & 1)
+ return true;
+
+ return false;
+}
+
+/* Enable port and MIB counters update */
+void mv_gop110_xlg_mac_port_enable(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+ reg_val |= MV_XLG_MAC_CTRL0_PORTEN_MASK;
+ reg_val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK;
+
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG, reg_val);
+}
+
+/* Disable port */
+void mv_gop110_xlg_mac_port_disable(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ /* mask all port interrupts */
+ mv_gop110_xlg_port_link_event_mask(gop, mac_num);
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+ reg_val &= ~MV_XLG_MAC_CTRL0_PORTEN_MASK;
+
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG, reg_val);
+}
+
+void mv_gop110_xlg_mac_port_periodic_xon_set(struct gop_hw *gop,
+ int mac_num,
+ int enable)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+
+ if (enable)
+ reg_val |= MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK;
+ else
+ reg_val &= ~MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK;
+
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG, reg_val);
+}
+
+int mv_gop110_xlg_mac_link_status(struct gop_hw *gop,
+ int mac_num,
+ struct mv_port_link_status *pstatus)
+{
+ u32 reg_val;
+ u32 mac_mode;
+ u32 fc_en;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL3_REG);
+ mac_mode = (reg_val & MV_XLG_MAC_CTRL3_MACMODESELECT_MASK) >>
+ MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS;
+
+ /* speed and duplex */
+ switch (mac_mode) {
+ case 0:
+ pstatus->speed = MV_PORT_SPEED_1000;
+ pstatus->duplex = MV_PORT_DUPLEX_AN;
+ break;
+ case 1:
+ pstatus->speed = MV_PORT_SPEED_10000;
+ pstatus->duplex = MV_PORT_DUPLEX_FULL;
+ break;
+ default:
+ return -1;
+ }
+
+ /* link status */
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_MAC_PORT_STATUS_REG);
+ if (reg_val & MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK)
+ pstatus->linkup = 1 /*TRUE*/;
+ else
+ pstatus->linkup = 0 /*FALSE*/;
+
+ /* flow control status */
+ fc_en = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+ if (reg_val & MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_MASK)
+ pstatus->tx_fc = MV_PORT_FC_ACTIVE;
+ else if (fc_en & MV_XLG_MAC_CTRL0_TXFCEN_MASK)
+ pstatus->tx_fc = MV_PORT_FC_ENABLE;
+ else
+ pstatus->tx_fc = MV_PORT_FC_DISABLE;
+
+ if (reg_val & MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_MASK)
+ pstatus->rx_fc = MV_PORT_FC_ACTIVE;
+ else if (fc_en & MV_XLG_MAC_CTRL0_RXFCEN_MASK)
+ pstatus->rx_fc = MV_PORT_FC_ENABLE;
+ else
+ pstatus->rx_fc = MV_PORT_FC_DISABLE;
+
+ return 0;
+}
+
+/* Change maximum receive size of the port */
+int mv_gop110_xlg_mac_max_rx_size_set(struct gop_hw *gop, int mac_num,
+ int max_rx_size)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL1_REG);
+ reg_val &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
+ reg_val |= (((max_rx_size - MVPP2_MH_SIZE) / 2) <<
+ MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS);
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL1_REG, reg_val);
+
+ return 0;
+}
+
+/* Sets "Force Link Pass" and "Do Not Force Link Fail" bits.
+* This function should only be called when the port is disabled.
+* INPUT:
+* int port - port number
+* bool force_link_pass - Force Link Pass
+* bool force_link_fail - Force Link Failure
+* 0, 0 - normal state: detect link via PHY and connector
+* 1, 1 - prohibited state.
+*/
+int mv_gop110_xlg_mac_force_link_mode_set(struct gop_hw *gop, int mac_num,
+ bool force_link_up,
+ bool force_link_down)
+{
+ u32 reg_val;
+
+ /* Can't force link pass and link fail at the same time */
+ if ((force_link_up) && (force_link_down))
+ return -EINVAL;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+
+ if (force_link_up)
+ reg_val |= MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
+ else
+ reg_val &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
+
+ if (force_link_down)
+ reg_val |= MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
+ else
+ reg_val &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
+
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG, reg_val);
+
+ return 0;
+}
+
+/* Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+* Sets port duplex to Auto Negotiation / Full / Half Duplex.
+*/
+int mv_gop110_xlg_mac_speed_duplex_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex)
+{
+ /* not supported */
+ return -1;
+}
+
+/* Gets port speed and duplex */
+int mv_gop110_xlg_mac_speed_duplex_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex)
+{
+ /* not supported */
+ return -1;
+}
+
+/* Configure the port's Flow Control properties */
+int mv_gop110_xlg_mac_fc_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc fc)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+
+ switch (fc) {
+ case MV_PORT_FC_DISABLE:
+ reg_val &= ~MV_XLG_MAC_CTRL0_RXFCEN_MASK;
+ reg_val &= ~MV_XLG_MAC_CTRL0_TXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_ENABLE:
+ reg_val |= MV_XLG_MAC_CTRL0_RXFCEN_MASK;
+ reg_val |= MV_XLG_MAC_CTRL0_TXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_TX_DISABLE:
+ reg_val &= ~MV_XLG_MAC_CTRL0_TXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_RX_DISABLE:
+ reg_val &= ~MV_XLG_MAC_CTRL0_RXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_TX_ENABLE:
+ reg_val |= MV_XLG_MAC_CTRL0_TXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_RX_ENABLE:
+ reg_val |= MV_XLG_MAC_CTRL0_RXFCEN_MASK;
+ break;
+
+ case MV_PORT_FC_AN_NO:
+ case MV_PORT_FC_AN_SYM:
+ case MV_PORT_FC_AN_ASYM:
+ default:
+ pr_err("XLG MAC: Unexpected FlowControl value %d\n", fc);
+ return -EINVAL;
+ }
+
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG, reg_val);
+ return 0;
+}
+
+/* Get Flow Control configuration of the port */
+void mv_gop110_xlg_mac_fc_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc *fc)
+{
+ u32 reg_val;
+
+ /* No auto negotiation for flow control */
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+
+ if ((reg_val & MV_XLG_MAC_CTRL0_RXFCEN_MASK) &&
+ (reg_val & MV_XLG_MAC_CTRL0_TXFCEN_MASK))
+ *fc = MV_PORT_FC_ENABLE;
+ else
+ *fc = MV_PORT_FC_DISABLE;
+}
+
+int mv_gop110_xlg_mac_port_link_speed_fc(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ int force_link_up)
+{
+ if (force_link_up) {
+ if (mv_gop110_xlg_mac_fc_set(gop, mac_num,
+ MV_PORT_FC_ENABLE)) {
+ pr_err("mv_gop110_xlg_mac_fc_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_xlg_mac_force_link_mode_set(gop, mac_num,
+ 1, 0)) {
+ pr_err(
+ "mv_gop110_xlg_mac_force_link_mode_set failed\n");
+ return -EPERM;
+ }
+ } else {
+ if (mv_gop110_xlg_mac_force_link_mode_set(gop, mac_num,
+ 0, 0)) {
+ pr_err(
+ "mv_gop110_xlg_mac_force_link_mode_set failed\n");
+ return -EPERM;
+ }
+ if (mv_gop110_xlg_mac_fc_set(gop, mac_num,
+ MV_PORT_FC_AN_SYM)) {
+ pr_err("mv_gop110_xlg_mac_fc_set failed\n");
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+void mv_gop110_xlg_port_link_event_mask(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
+ reg_val &= ~(1 << 1);
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg_val);
+}
+
+void mv_gop110_xlg_port_external_event_unmask(struct gop_hw *gop, int mac_num,
+ int bit_2_open)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
+ reg_val |= (1 << bit_2_open);
+ reg_val |= 1; /* unmask summary bit */
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg_val);
+}
+
+void mv_gop110_xlg_port_link_event_clear(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_INTERRUPT_CAUSE_REG);
+}
+
+void mv_gop110_xlg_2_gig_mac_cfg(struct gop_hw *gop, int mac_num)
+{
+ u32 reg_val;
+
+ /* relevant only for MAC0 (XLG0 and GMAC0) */
+ if (mac_num > 0)
+ return;
+
+ /* configure 1Gig MAC mode */
+ reg_val = mv_gop110_xlg_mac_read(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL3_REG);
+ U32_SET_FIELD(reg_val, MV_XLG_MAC_CTRL3_MACMODESELECT_MASK,
+ (0 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS));
+ mv_gop110_xlg_mac_write(gop, mac_num,
+ MV_XLG_PORT_MAC_CTRL3_REG, reg_val);
+}
+
+/* print value of unit registers */
+void mv_gop110_xpcs_gl_regs_dump(struct gop_hw *gop)
+{
+ pr_info("\nXPCS Global registers]\n");
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_CFG_0",
+ MV_XPCS_GLOBAL_CFG_0_REG);
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_CFG_1",
+ MV_XPCS_GLOBAL_CFG_1_REG);
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_FIFO_THR_CFG",
+ MV_XPCS_GLOBAL_FIFO_THR_CFG_REG);
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_MAX_IDLE_CNTR",
+ MV_XPCS_GLOBAL_MAX_IDLE_CNTR_REG);
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_STATUS",
+ MV_XPCS_GLOBAL_STATUS_REG);
+ mv_gop110_xpcs_global_print(gop, "GLOBAL_DESKEW_ERR_CNTR",
+ MV_XPCS_GLOBAL_DESKEW_ERR_CNTR_REG);
+ mv_gop110_xpcs_global_print(gop, "TX_PCKTS_CNTR_LSB",
+ MV_XPCS_TX_PCKTS_CNTR_LSB_REG);
+ mv_gop110_xpcs_global_print(gop, "TX_PCKTS_CNTR_MSB",
+ MV_XPCS_TX_PCKTS_CNTR_MSB_REG);
+}
+EXPORT_SYMBOL(mv_gop110_xpcs_gl_regs_dump);
+
+/* print value of unit registers */
+void mv_gop110_xpcs_lane_regs_dump(struct gop_hw *gop, int lane)
+{
+ pr_info("\nXPCS Lane #%d registers]\n", lane);
+ mv_gop110_xpcs_lane_print(gop, "LANE_CFG_0", lane,
+ MV_XPCS_LANE_CFG_0_REG);
+ mv_gop110_xpcs_lane_print(gop, "LANE_CFG_1", lane,
+ MV_XPCS_LANE_CFG_1_REG);
+ mv_gop110_xpcs_lane_print(gop, "LANE_STATUS", lane,
+ MV_XPCS_LANE_STATUS_REG);
+ mv_gop110_xpcs_lane_print(gop, "SYMBOL_ERR_CNTR", lane,
+ MV_XPCS_SYMBOL_ERR_CNTR_REG);
+ mv_gop110_xpcs_lane_print(gop, "DISPARITY_ERR_CNTR", lane,
+ MV_XPCS_DISPARITY_ERR_CNTR_REG);
+ mv_gop110_xpcs_lane_print(gop, "PRBS_ERR_CNTR", lane,
+ MV_XPCS_PRBS_ERR_CNTR_REG);
+ mv_gop110_xpcs_lane_print(gop, "RX_PCKTS_CNTR_LSB", lane,
+ MV_XPCS_RX_PCKTS_CNTR_LSB_REG);
+ mv_gop110_xpcs_lane_print(gop, "RX_PCKTS_CNTR_MSB", lane,
+ MV_XPCS_RX_PCKTS_CNTR_MSB_REG);
+ mv_gop110_xpcs_lane_print(gop, "RX_BAD_PCKTS_CNTR_LSB", lane,
+ MV_XPCS_RX_BAD_PCKTS_CNTR_LSB_REG);
+ mv_gop110_xpcs_lane_print(gop, "RX_BAD_PCKTS_CNTR_MSB", lane,
+ MV_XPCS_RX_BAD_PCKTS_CNTR_MSB_REG);
+ mv_gop110_xpcs_lane_print(gop, "CYCLIC_DATA_0", lane,
+ MV_XPCS_CYCLIC_DATA_0_REG);
+ mv_gop110_xpcs_lane_print(gop, "CYCLIC_DATA_1", lane,
+ MV_XPCS_CYCLIC_DATA_1_REG);
+ mv_gop110_xpcs_lane_print(gop, "CYCLIC_DATA_2", lane,
+ MV_XPCS_CYCLIC_DATA_2_REG);
+ mv_gop110_xpcs_lane_print(gop, "CYCLIC_DATA_3", lane,
+ MV_XPCS_CYCLIC_DATA_3_REG);
+}
+EXPORT_SYMBOL(mv_gop110_xpcs_lane_regs_dump);
+
+/* Set PCS to reset or exit from reset */
+int mv_gop110_xpcs_reset(struct gop_hw *gop, enum mv_reset reset)
+{
+ u32 reg_addr;
+ u32 val;
+
+ reg_addr = MV_XPCS_GLOBAL_CFG_0_REG;
+
+ /* read - modify - write */
+ val = mv_gop110_xpcs_global_read(gop, reg_addr);
+ if (reset == RESET)
+ val &= ~MV_XPCS_GLOBAL_CFG_0_PCSRESET_MASK;
+ else
+ val |= MV_XPCS_GLOBAL_CFG_0_PCSRESET_MASK;
+ mv_gop110_xpcs_global_write(gop, reg_addr, val);
+
+ return 0;
+}
+
+/* Set the internal mux's to the required PCS in the PI */
+int mv_gop110_xpcs_mode(struct gop_hw *gop, int num_of_lanes)
+{
+ u32 reg_addr;
+ u32 val;
+ int lane;
+
+ switch (num_of_lanes) {
+ case 1:
+ lane = 0;
+ break;
+ case 2:
+ lane = 1;
+ break;
+ case 4:
+ lane = 2;
+ break;
+ default:
+ return -1;
+ }
+
+ /* configure XG MAC mode */
+ reg_addr = MV_XPCS_GLOBAL_CFG_0_REG;
+ val = mv_gop110_xpcs_global_read(gop, reg_addr);
+ val &= ~MV_XPCS_GLOBAL_CFG_0_PCSMODE_MASK;
+ U32_SET_FIELD(val, MV_XPCS_GLOBAL_CFG_0_PCSMODE_MASK, 0);
+ U32_SET_FIELD(val, MV_XPCS_GLOBAL_CFG_0_LANEACTIVE_MASK, (2 * lane) <<
+ MV_XPCS_GLOBAL_CFG_0_LANEACTIVE_OFFS);
+ mv_gop110_xpcs_global_write(gop, reg_addr, val);
+
+ return 0;
+}
+
+int mv_gop110_mpcs_mode(struct gop_hw *gop)
+{
+ u32 reg_addr;
+ u32 val;
+
+ /* configure PCS40G COMMON CONTROL */
+ reg_addr = PCS40G_COMMON_CONTROL;
+ val = mv_gop110_mpcs_global_read(gop, reg_addr);
+ U32_SET_FIELD(val, FORWARD_ERROR_CORRECTION_MASK,
+ 0 << FORWARD_ERROR_CORRECTION_OFFSET);
+
+ mv_gop110_mpcs_global_write(gop, reg_addr, val);
+
+ /* configure PCS CLOCK RESET */
+ reg_addr = PCS_CLOCK_RESET;
+ val = mv_gop110_mpcs_global_read(gop, reg_addr);
+ U32_SET_FIELD(val, CLK_DIVISION_RATIO_MASK,
+ 1 << CLK_DIVISION_RATIO_OFFSET);
+
+ mv_gop110_mpcs_global_write(gop, reg_addr, val);
+
+ U32_SET_FIELD(val, CLK_DIV_PHASE_SET_MASK,
+ 0 << CLK_DIV_PHASE_SET_OFFSET);
+ U32_SET_FIELD(val, MAC_CLK_RESET_MASK, 1 << MAC_CLK_RESET_OFFSET);
+ U32_SET_FIELD(val, RX_SD_CLK_RESET_MASK, 1 << RX_SD_CLK_RESET_OFFSET);
+ U32_SET_FIELD(val, TX_SD_CLK_RESET_MASK, 1 << TX_SD_CLK_RESET_OFFSET);
+
+ mv_gop110_mpcs_global_write(gop, reg_addr, val);
+
+ return 0;
+}
+
+void mv_gop110_mpcs_clock_reset(struct gop_hw *gop, enum mv_reset reset)
+{
+ u32 val, reg_addr, val1;
+
+ if (reset == RESET)
+ val1 = 0x0;
+ else
+ val1 = 0x1;
+
+ /* configure PCS CLOCK RESET */
+ reg_addr = PCS_CLOCK_RESET;
+ val = mv_gop110_mpcs_global_read(gop, reg_addr);
+
+ U32_SET_FIELD(val, MAC_CLK_RESET_MASK, val1 << MAC_CLK_RESET_OFFSET);
+ U32_SET_FIELD(val, RX_SD_CLK_RESET_MASK, val1 << RX_SD_CLK_RESET_OFFSET);
+ U32_SET_FIELD(val, TX_SD_CLK_RESET_MASK, val1 << TX_SD_CLK_RESET_OFFSET);
+
+ mv_gop110_mpcs_global_write(gop, reg_addr, val);
+}
+
+u64 mv_gop110_mib_read64(struct gop_hw *gop, int port, unsigned int offset)
+{
+ u64 val, val2;
+
+ val = mv_gop110_xmib_mac_read(gop, port, offset);
+ if (offset == MV_MIB_GOOD_OCTETS_RECEIVED_LOW ||
+ offset == MV_MIB_GOOD_OCTETS_SENT_LOW) {
+ val2 = mv_gop110_xmib_mac_read(gop, port, offset + 4);
+ val += (val2 << 32);
+ }
+
+ return val;
+}
+
+static void mv_gop110_mib_print(struct gop_hw *gop, int port, u32 offset,
+ char *mib_name)
+{
+ u64 val;
+
+ val = mv_gop110_mib_read64(gop, port, offset);
+ pr_info(" %-32s: 0x%02x = %lld\n", mib_name, offset, val);
+}
+
+void mv_gop110_mib_counters_show(struct gop_hw *gop, int port)
+{
+ pr_info("\n[Rx]\n");
+ mv_gop110_mib_print(gop, port, MV_MIB_GOOD_OCTETS_RECEIVED_LOW,
+ "GOOD_OCTETS_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_BAD_OCTETS_RECEIVED,
+ "BAD_OCTETS_RECEIVED");
+
+ mv_gop110_mib_print(gop, port, MV_MIB_UNICAST_FRAMES_RECEIVED,
+ "UNCAST_FRAMES_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_BROADCAST_FRAMES_RECEIVED,
+ "BROADCAST_FRAMES_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_MULTICAST_FRAMES_RECEIVED,
+ "MULTICAST_FRAMES_RECEIVED");
+
+ pr_info("\n[RMON]\n");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_64_OCTETS,
+ "FRAMES_64_OCTETS");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_65_TO_127_OCTETS,
+ "FRAMES_65_TO_127_OCTETS");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_128_TO_255_OCTETS,
+ "FRAMES_128_TO_255_OCTETS");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_256_TO_511_OCTETS,
+ "FRAMES_256_TO_511_OCTETS");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_512_TO_1023_OCTETS,
+ "FRAMES_512_TO_1023_OCTETS");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAMES_1024_TO_MAX_OCTETS,
+ "FRAMES_1024_TO_MAX_OCTETS");
+
+ pr_info("\n[Tx]\n");
+ mv_gop110_mib_print(gop, port, MV_MIB_GOOD_OCTETS_SENT_LOW,
+ "GOOD_OCTETS_SENT");
+ mv_gop110_mib_print(gop, port, MV_MIB_UNICAST_FRAMES_SENT,
+ "UNICAST_FRAMES_SENT");
+ mv_gop110_mib_print(gop, port, MV_MIB_MULTICAST_FRAMES_SENT,
+ "MULTICAST_FRAMES_SENT");
+ mv_gop110_mib_print(gop, port, MV_MIB_BROADCAST_FRAMES_SENT,
+ "BROADCAST_FRAMES_SENT");
+ mv_gop110_mib_print(gop, port, MV_MIB_CRC_ERRORS_SENT,
+ "CRC_ERRORS_SENT");
+
+ pr_info("\n[FC control]\n");
+ mv_gop110_mib_print(gop, port, MV_MIB_FC_RECEIVED,
+ "FC_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_FC_SENT,
+ "FC_SENT");
+
+ pr_info("\n[Errors]\n");
+ mv_gop110_mib_print(gop, port, MV_MIB_RX_FIFO_OVERRUN,
+ "RX_FIFO_OVERRUN");
+ mv_gop110_mib_print(gop, port, MV_MIB_UNDERSIZE_RECEIVED,
+ "UNDERSIZE_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_FRAGMENTS_RECEIVED,
+ "FRAGMENTS_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_OVERSIZE_RECEIVED,
+ "OVERSIZE_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_JABBER_RECEIVED,
+ "JABBER_RECEIVED");
+ mv_gop110_mib_print(gop, port, MV_MIB_MAC_RECEIVE_ERROR,
+ "MAC_RECEIVE_ERROR");
+ mv_gop110_mib_print(gop, port, MV_MIB_BAD_CRC_EVENT,
+ "BAD_CRC_EVENT");
+ mv_gop110_mib_print(gop, port, MV_MIB_COLLISION,
+ "COLLISION");
+ /* This counter must be read last. Read it clear all the counters */
+ mv_gop110_mib_print(gop, port, MV_MIB_LATE_COLLISION,
+ "LATE_COLLISION");
+}
+EXPORT_SYMBOL(mv_gop110_mib_counters_show);
+
+void mv_gop110_mib_counters_stat_update(struct gop_hw *gop, int port, struct gop_stat *gop_statistics)
+{
+ u64 val;
+
+ gop_statistics->rx_byte += mv_gop110_mib_read64(gop, port,
+ MV_MIB_GOOD_OCTETS_RECEIVED_LOW);
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_UNICAST_FRAMES_RECEIVED);
+ gop_statistics->rx_unicast += val;
+ gop_statistics->rx_frames += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_BROADCAST_FRAMES_RECEIVED);
+ gop_statistics->rx_bcast += val;
+ gop_statistics->rx_frames += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_MULTICAST_FRAMES_RECEIVED);
+ gop_statistics->rx_mcast += val;
+ gop_statistics->rx_frames += val;
+
+ gop_statistics->tx_byte += mv_gop110_mib_read64(gop, port,
+ MV_MIB_GOOD_OCTETS_SENT_LOW);
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_UNICAST_FRAMES_SENT);
+ gop_statistics->tx_unicast += val;
+ gop_statistics->tx_frames += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_MULTICAST_FRAMES_SENT);
+ gop_statistics->tx_mcast += val;
+ gop_statistics->tx_frames += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_BROADCAST_FRAMES_SENT);
+ gop_statistics->tx_bcast += val;
+ gop_statistics->tx_frames += val;
+
+ gop_statistics->tx_crc_sent += mv_gop110_mib_read64(gop, port,
+ MV_MIB_CRC_ERRORS_SENT);
+
+ gop_statistics->rx_pause += mv_gop110_mib_read64(gop, port,
+ MV_MIB_FC_RECEIVED);
+
+ gop_statistics->tx_pause += mv_gop110_mib_read64(gop, port,
+ MV_MIB_FC_SENT);
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_RX_FIFO_OVERRUN);
+ gop_statistics->rx_overrun += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_UNDERSIZE_RECEIVED);
+ gop_statistics->rx_runt += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_FRAGMENTS_RECEIVED);
+ gop_statistics->rx_fragments_err += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_OVERSIZE_RECEIVED);
+ gop_statistics->rx_giant += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_JABBER_RECEIVED);
+ gop_statistics->rx_jabber += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_MAC_RECEIVE_ERROR);
+ gop_statistics->rx_jabber += val;
+ gop_statistics->rx_total_err += val;
+
+ val = mv_gop110_mib_read64(gop, port, MV_MIB_BAD_CRC_EVENT);
+ gop_statistics->rx_crc += val;
+ gop_statistics->rx_total_err += val;
+
+ gop_statistics->collision += mv_gop110_mib_read64(gop, port,
+ MV_MIB_COLLISION);
+
+ /* This counter must be read last. Read it clear all the counters */
+ gop_statistics->late_collision += mv_gop110_mib_read64(gop, port,
+ MV_MIB_LATE_COLLISION);
+}
+
+void mv_gop110_netc_active_port(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_1);
+ reg &= ~(NETC_PORTS_ACTIVE_MASK(port));
+
+ val <<= NETC_PORTS_ACTIVE_OFFSET(port);
+ val &= NETC_PORTS_ACTIVE_MASK(port);
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_1, reg);
+}
+
+static void mv_gop110_netc_xaui_enable(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, SD1_CONTROL_1_REG);
+ reg &= ~SD1_CONTROL_XAUI_EN_MASK;
+
+ val <<= SD1_CONTROL_XAUI_EN_OFFSET;
+ val &= SD1_CONTROL_XAUI_EN_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, SD1_CONTROL_1_REG, reg);
+}
+
+static void mv_gop110_netc_rxaui0_enable(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, SD1_CONTROL_1_REG);
+ reg &= ~SD1_CONTROL_RXAUI0_L23_EN_MASK;
+
+ val <<= SD1_CONTROL_RXAUI0_L23_EN_OFFSET;
+ val &= SD1_CONTROL_RXAUI0_L23_EN_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, SD1_CONTROL_1_REG, reg);
+}
+
+static void mv_gop110_netc_rxaui1_enable(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, SD1_CONTROL_1_REG);
+ reg &= ~SD1_CONTROL_RXAUI1_L45_EN_MASK;
+
+ val <<= SD1_CONTROL_RXAUI1_L45_EN_OFFSET;
+ val &= SD1_CONTROL_RXAUI1_L45_EN_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, SD1_CONTROL_1_REG, reg);
+}
+
+static void mv_gop110_netc_mii_mode(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_CONTROL_0);
+ reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
+
+ val <<= NETC_GBE_PORT1_MII_MODE_OFFSET;
+ val &= NETC_GBE_PORT1_MII_MODE_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_CONTROL_0, reg);
+}
+
+static void mv_gop110_netc_gop_reset(struct gop_hw *gop, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_GOP_SOFT_RESET_1_REG);
+ reg &= ~NETC_GOP_SOFT_RESET_MASK;
+
+ val <<= NETC_GOP_SOFT_RESET_OFFSET;
+ val &= NETC_GOP_SOFT_RESET_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_GOP_SOFT_RESET_1_REG, reg);
+}
+
+static void mv_gop110_netc_gop_clock_logic_set(struct gop_hw *gop, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_0);
+ reg &= ~NETC_CLK_DIV_PHASE_MASK;
+
+ val <<= NETC_CLK_DIV_PHASE_OFFSET;
+ val &= NETC_CLK_DIV_PHASE_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_0, reg);
+}
+
+static void mv_gop110_netc_port_rf_reset(struct gop_hw *gop, u32 port, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_1);
+ reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(port));
+
+ val <<= NETC_PORT_GIG_RF_RESET_OFFSET(port);
+ val &= NETC_PORT_GIG_RF_RESET_MASK(port);
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_1, reg);
+}
+
+static void mv_gop110_netc_gbe_sgmii_mode_select(struct gop_hw *gop, u32 port,
+ u32 val)
+{
+ u32 reg, mask, offset;
+
+ if (port == 2) {
+ mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
+ offset = NETC_GBE_PORT0_SGMII_MODE_OFFSET;
+ } else {
+ mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
+ offset = NETC_GBE_PORT1_SGMII_MODE_OFFSET;
+ }
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_CONTROL_0);
+ reg &= ~mask;
+
+ val <<= offset;
+ val &= mask;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_CONTROL_0, reg);
+}
+
+static void mv_gop110_netc_bus_width_select(struct gop_hw *gop, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_0);
+ reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
+
+ val <<= NETC_BUS_WIDTH_SELECT_OFFSET;
+ val &= NETC_BUS_WIDTH_SELECT_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_0, reg);
+}
+
+static void mv_gop110_netc_sample_stages_timing(struct gop_hw *gop, u32 val)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_0);
+ reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
+
+ val <<= NETC_GIG_RX_DATA_SAMPLE_OFFSET;
+ val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
+
+ reg |= val;
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_0, reg);
+}
+
+static void mv_gop110_netc_mac_to_xgmii(struct gop_hw *gop, u32 port,
+ enum mv_netc_phase phase)
+{
+ switch (phase) {
+ case MV_NETC_FIRST_PHASE:
+ /* Set Bus Width to HB mode = 1 */
+ mv_gop110_netc_bus_width_select(gop, 1);
+ /* Select RGMII mode */
+ mv_gop110_netc_gbe_sgmii_mode_select(gop, port,
+ MV_NETC_GBE_XMII);
+ break;
+ case MV_NETC_SECOND_PHASE:
+ /* De-assert the relevant port HB reset */
+ mv_gop110_netc_port_rf_reset(gop, port, 1);
+ break;
+ }
+}
+
+static void mv_gop110_netc_mac_to_sgmii(struct gop_hw *gop, u32 port,
+ enum mv_netc_phase phase)
+{
+ switch (phase) {
+ case MV_NETC_FIRST_PHASE:
+ /* Set Bus Width to HB mode = 1 */
+ mv_gop110_netc_bus_width_select(gop, 1);
+ /* Select SGMII mode */
+ if (port >= 1)
+ mv_gop110_netc_gbe_sgmii_mode_select(gop, port,
+ MV_NETC_GBE_SGMII);
+
+ /* Configure the sample stages */
+ mv_gop110_netc_sample_stages_timing(gop, 0);
+ /* Configure the ComPhy Selector */
+ /* mv_gop110_netc_com_phy_selector_config(netComplex); */
+ break;
+ case MV_NETC_SECOND_PHASE:
+ /* De-assert the relevant port HB reset */
+ mv_gop110_netc_port_rf_reset(gop, port, 1);
+ break;
+ }
+}
+
+static void mv_gop110_netc_mac_to_rxaui(struct gop_hw *gop, u32 port,
+ enum mv_netc_phase phase,
+ enum mv_netc_lanes lanes)
+{
+ /* Currently only RXAUI0 supported */
+ if (port != 0)
+ return;
+
+ switch (phase) {
+ case MV_NETC_FIRST_PHASE:
+ /* RXAUI Serdes/s Clock alignment */
+ if (lanes == MV_NETC_LANE_23)
+ mv_gop110_netc_rxaui0_enable(gop, port, 1);
+ else
+ mv_gop110_netc_rxaui1_enable(gop, port, 1);
+ break;
+ case MV_NETC_SECOND_PHASE:
+ /* De-assert the relevant port HB reset */
+ mv_gop110_netc_port_rf_reset(gop, port, 1);
+ break;
+ }
+}
+
+static void mv_gop110_netc_mac_to_xaui(struct gop_hw *gop, u32 port,
+ enum mv_netc_phase phase)
+{
+ switch (phase) {
+ case MV_NETC_FIRST_PHASE:
+ /* RXAUI Serdes/s Clock alignment */
+ mv_gop110_netc_xaui_enable(gop, port, 1);
+ break;
+ case MV_NETC_SECOND_PHASE:
+ /* De-assert the relevant port HB reset */
+ mv_gop110_netc_port_rf_reset(gop, port, 1);
+ break;
+ }
+}
+
+int mv_gop110_netc_init(struct gop_hw *gop,
+ u32 net_comp_config, enum mv_netc_phase phase)
+{
+ u32 c = net_comp_config;
+
+ if (c & MV_NETC_GE_MAC0_RXAUI_L23)
+ mv_gop110_netc_mac_to_rxaui(gop, 0, phase, MV_NETC_LANE_23);
+
+ if (c & MV_NETC_GE_MAC0_RXAUI_L45)
+ mv_gop110_netc_mac_to_rxaui(gop, 0, phase, MV_NETC_LANE_45);
+
+ if (c & MV_NETC_GE_MAC0_XAUI)
+ mv_gop110_netc_mac_to_xaui(gop, 0, phase);
+
+ if (c & MV_NETC_GE_MAC2_SGMII)
+ mv_gop110_netc_mac_to_sgmii(gop, 2, phase);
+ else
+ mv_gop110_netc_mac_to_xgmii(gop, 2, phase);
+ if (c & MV_NETC_GE_MAC3_SGMII) {
+ mv_gop110_netc_mac_to_sgmii(gop, 3, phase);
+ } else {
+ mv_gop110_netc_mac_to_xgmii(gop, 3, phase);
+ if (c & MV_NETC_GE_MAC3_RGMII)
+ mv_gop110_netc_mii_mode(gop, 3, MV_NETC_GBE_RGMII);
+ else
+ mv_gop110_netc_mii_mode(gop, 3, MV_NETC_GBE_MII);
+ }
+
+ /* Activate gop ports 0, 2, 3 */
+ mv_gop110_netc_active_port(gop, 0, 1);
+ mv_gop110_netc_active_port(gop, 2, 1);
+ mv_gop110_netc_active_port(gop, 3, 1);
+
+ if (phase == MV_NETC_SECOND_PHASE) {
+ /* Enable the GOP internal clock logic */
+ mv_gop110_netc_gop_clock_logic_set(gop, 1);
+ /* De-assert GOP unit reset */
+ mv_gop110_netc_gop_reset(gop, 1);
+ }
+ return 0;
+}
+
+void mv_gop110_netc_xon_set(struct gop_hw *gop, enum mv_gop_port port, bool en)
+{
+ u32 reg;
+
+ reg = mv_gop110_rfu1_read(gop, MV_NETCOMP_PORTS_CONTROL_0);
+
+ switch (port) {
+ case MV_GOP_PORT0:
+ U32_SET_FIELD(reg, NETC_PORT0_PAUSE_MASK,
+ en << NETC_PORT0_PAUSE_OFFSET);
+ break;
+ case MV_GOP_PORT1:
+ pr_err("%s: Wrong gop port (%d)\n", __func__, port);
+ break;
+ case MV_GOP_PORT2:
+ U32_SET_FIELD(reg, NETC_PORT2_PAUSE_MASK,
+ en << NETC_PORT2_PAUSE_OFFSET);
+ break;
+ case MV_GOP_PORT3:
+ U32_SET_FIELD(reg, NETC_PORT3_PAUSE_MASK,
+ en << NETC_PORT3_PAUSE_OFFSET);
+ break;
+ }
+
+ mv_gop110_rfu1_write(gop, MV_NETCOMP_PORTS_CONTROL_0, reg);
+}
+EXPORT_SYMBOL(mv_gop110_netc_xon_set);
+
+void mv_gop110_fca_send_periodic(struct gop_hw *gop, int mac_num, bool en)
+{
+ int val;
+
+ val = mv_gop110_fca_read(gop, mac_num, FCA_CONTROL_REG);
+
+ U32_SET_FIELD(val, FCA_PORT_TYPE_MASK,
+ FCA_PORT_TYPE_B << FCA_PORT_TYPE_OFFSET);
+ U32_SET_FIELD(val, FCA_SEND_PERIODIC_MASK,
+ en << FCA_SEND_PERIODIC_OFFSET);
+ mv_gop110_fca_write(gop, mac_num, FCA_CONTROL_REG, val);
+}
+
+void mv_gop110_fca_enable_periodic(struct gop_hw *gop, int mac_num, bool en)
+{
+ int val;
+
+ val = mv_gop110_fca_read(gop, mac_num, FCA_CONTROL_REG);
+
+ U32_SET_FIELD(val, FCA_ENABLE_PERIODIC_MASK,
+ en << FCA_ENABLE_PERIODIC_OFFSET);
+ mv_gop110_fca_write(gop, mac_num, FCA_CONTROL_REG, val);
+}
+
+void mv_gop110_fca_set_timer(struct gop_hw *gop, int mac_num, u32 lsb, u32 msb)
+{
+ mv_gop110_fca_write(gop, mac_num, PERIODIC_COUNTER_LSB_REG, lsb);
+ mv_gop110_fca_write(gop, mac_num, PERIODIC_COUNTER_MSB_REG, msb);
+}
+
+void mv_gop110_fca_set_periodic_timer(struct gop_hw *gop, int mac_num, u64 timer)
+{
+ u32 lsb, msb;
+
+ mv_gop110_fca_send_periodic(gop, mac_num, false);
+ mv_gop110_fca_enable_periodic(gop, mac_num, false);
+
+ lsb = lower_32_bits(timer);
+ msb = upper_32_bits(timer);
+
+ mv_gop110_fca_set_timer(gop, mac_num, lsb, msb);
+
+ mv_gop110_fca_send_periodic(gop, mac_num, true);
+ mv_gop110_fca_enable_periodic(gop, mac_num, true);
+}
+EXPORT_SYMBOL(mv_gop110_fca_set_periodic_timer);
+
+void mv_gop110_fca_tx_enable(struct gop_hw *gop, int mac_num, bool en)
+{
+ int val;
+
+ val = mv_gop110_fca_read(gop, mac_num, FCA_CONTROL_REG);
+
+ U32_SET_FIELD(val, FCA_PORT_TYPE_MASK,
+ FCA_PORT_TYPE_B << FCA_PORT_TYPE_OFFSET);
+ U32_SET_FIELD(val, FCA_BYPASS_MASK,
+ en << FCA_BYPASS_OFFSET);
+ mv_gop110_fca_write(gop, mac_num, FCA_CONTROL_REG, val);
+}
+
+bool mv_gop110_check_fca_tx_state(struct gop_hw *gop, int mac_num)
+{
+ int val;
+
+ val = mv_gop110_fca_read(gop, mac_num, FCA_CONTROL_REG);
+
+ if (val & FCA_BYPASS_MASK)
+ return false;
+
+ return true;
+}
+
+/* Register dump for ethtool */
+void mv_gop110_gmac_registers_dump(struct mv_pp2x_port *port, u32 *regs_buff)
+{
+ int i;
+ int index = 0;
+
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_CTRL0_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_CTRL1_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_CTRL2_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_AUTO_NEG_CFG_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_STATUS0_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_FIFO_CFG_0_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_FIFO_CFG_1_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_INTERRUPT_CAUSE_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_INTERRUPT_MASK_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERDES_CFG0_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERDES_CFG1_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERDES_CFG2_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERDES_CFG3_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_PRBS_STATUS_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_PRBS_ERR_CNTR_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_STATUS1_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_CTRL3_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_QSGMII_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_QSGMII_STATUS_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_QSGMII_PRBS_CNTR_REG);
+ for (i = 0; i < 8; i++) {
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_CCFC_PORT_SPEED_TIMER_REG(i));
+ }
+ for (i = 0; i < 4; i++) {
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_FC_DSA_TAG_REG(i));
+ }
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_CTRL4_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PORT_SERIAL_PARAM_1_CFG_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_INTERRUPT_SUM_CAUSE_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_INTERRUPT_SUM_MASK_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LPI_CTRL_0_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LPI_CTRL_1_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LPI_CTRL_2_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LPI_STATUS_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_LPI_CNTR_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PULSE_1_MS_LOW_REG);
+ regs_buff[index++] = mv_gop110_gmac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_GMAC_PULSE_1_MS_HIGH_REG);
+}
+
+void mv_gop110_xlg_registers_dump(struct mv_pp2x_port *port, u32 *regs_buff)
+{
+ int gop_port = port->mac_data.gop_index;
+ int index = 0;
+
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL0_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL1_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL2_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL2_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_PORT_STATUS_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_FIFOS_THRS_CFG_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_INTERRUPT_CAUSE_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_INTERRUPT_MASK_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL3_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_DEBUG_BUS_STATUS_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_METAL_FIX_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MIB_CNTRS_CTRL_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_CNCCFC_TIMERI_REG(gop_port));
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_EXTERNAL_INTERRUPT_CAUSE_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_FC_DSA_TAG_0_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_FC_DSA_TAG_1_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_FC_DSA_TAG_2_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_FC_DSA_TAG_3_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL4_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_PORT_MAC_CTRL5_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_EXT_CTRL_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_MACRO_CTRL_REG);
+ regs_buff[index++] = mv_gop110_xlg_mac_read(&port->priv->hw.gop,
+ port->mac_data.gop_index,
+ MV_XLG_MAC_DIC_PPM_IPG_REDUCE_REG);
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.h b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.h
new file mode 100644
index 000000000000..d77be0eb5e85
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw.h
@@ -0,0 +1,472 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MV_GOP_HW_H_
+#define _MV_GOP_HW_H_
+
+/* Sets the field located at the specified in data. */
+#define U32_SET_FIELD(data, mask, val) ((data) = (((data) & ~(mask)) | (val)))
+
+/* port related */
+enum mv_reset {RESET, UNRESET};
+
+enum mv_port_speed {
+ MV_PORT_SPEED_AN,
+ MV_PORT_SPEED_10,
+ MV_PORT_SPEED_100,
+ MV_PORT_SPEED_1000,
+ MV_PORT_SPEED_2500,
+ MV_PORT_SPEED_10000
+};
+
+enum mv_port_duplex {
+ MV_PORT_DUPLEX_AN,
+ MV_PORT_DUPLEX_HALF,
+ MV_PORT_DUPLEX_FULL
+};
+
+enum mv_port_fc {
+ MV_PORT_FC_AN_NO,
+ MV_PORT_FC_AN_SYM,
+ MV_PORT_FC_AN_ASYM,
+ MV_PORT_FC_DISABLE,
+ MV_PORT_FC_TX_DISABLE,
+ MV_PORT_FC_RX_DISABLE,
+ MV_PORT_FC_ENABLE,
+ MV_PORT_FC_TX_ENABLE,
+ MV_PORT_FC_RX_ENABLE,
+ MV_PORT_FC_ACTIVE
+};
+
+struct mv_port_link_status {
+ int linkup; /*flag*/
+ enum mv_port_speed speed;
+ enum mv_port_duplex duplex;
+ enum mv_port_fc rx_fc;
+ enum mv_port_fc tx_fc;
+ enum mv_port_fc autoneg_fc;
+};
+
+/* different loopback types can be configure on different levels:
+ * MAC, PCS, SERDES
+ */
+enum mv_lb_type {
+ MV_DISABLE_LB,
+ MV_RX_2_TX_LB,
+ MV_TX_2_RX_LB, /* on SERDES level - analog loopback */
+ MV_TX_2_RX_DIGITAL_LB /* on SERDES level - digital loopback */
+};
+
+enum sd_media_mode {MV_RXAUI, MV_XAUI};
+
+/* Net Complex */
+enum mv_netc_topology {
+ MV_NETC_GE_MAC0_RXAUI_L23 = BIT(0),
+ MV_NETC_GE_MAC0_RXAUI_L45 = BIT(1),
+ MV_NETC_GE_MAC0_XAUI = BIT(2),
+ MV_NETC_GE_MAC2_SGMII = BIT(3),
+ MV_NETC_GE_MAC3_SGMII = BIT(4),
+ MV_NETC_GE_MAC3_RGMII = BIT(5),
+};
+
+enum mv_netc_phase {
+ MV_NETC_FIRST_PHASE,
+ MV_NETC_SECOND_PHASE,
+};
+
+enum mv_netc_sgmii_xmi_mode {
+ MV_NETC_GBE_SGMII,
+ MV_NETC_GBE_XMII,
+};
+
+enum mv_netc_mii_mode {
+ MV_NETC_GBE_RGMII,
+ MV_NETC_GBE_MII,
+};
+
+enum mv_netc_lanes {
+ MV_NETC_LANE_23,
+ MV_NETC_LANE_45,
+};
+
+enum mv_gop_port {
+ MV_GOP_PORT0 = 0,
+ MV_GOP_PORT1 = 1,
+ MV_GOP_PORT2 = 2,
+ MV_GOP_PORT3 = 3,
+};
+
+#define MV_RGMII_TX_FIFO_MIN_TH (0x41)
+#define MV_SGMII_TX_FIFO_MIN_TH (0x5)
+#define MV_SGMII2_5_TX_FIFO_MIN_TH (0xB)
+
+static inline u32 mv_gop_gen_read(void __iomem *base, u32 offset)
+{
+ void *reg_ptr = base + offset;
+ u32 val;
+
+ val = readl(reg_ptr);
+ return val;
+}
+
+static inline void mv_gop_gen_write(void __iomem *base, u32 offset, u32 data)
+{
+ void *reg_ptr = base + offset;
+
+ writel(data, reg_ptr);
+}
+
+/* GOP port configuration functions */
+int mv_gop110_port_init(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_port_reset(struct gop_hw *gop, struct mv_mac_data *mac);
+void mv_gop110_port_enable(struct gop_hw *gop, struct mv_mac_data *mac);
+void mv_gop110_port_disable(struct gop_hw *gop, struct mv_mac_data *mac);
+void mv_gop110_port_periodic_xon_set(struct gop_hw *gop,
+ struct mv_mac_data *mac,
+ int enable);
+bool mv_gop110_port_is_link_up(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_port_link_status(struct gop_hw *gop, struct mv_mac_data *mac,
+ struct mv_port_link_status *pstatus);
+bool mv_gop110_port_autoneg_status(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_check_port_type(struct gop_hw *gop, int port_num);
+void mv_gop110_gmac_set_autoneg(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool auto_neg);
+int mv_gop110_port_regs(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_port_events_mask(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_port_events_unmask(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_port_events_clear(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_status_show(struct gop_hw *gop, struct mv_pp2x *pp2, int port_num);
+int mv_gop110_speed_duplex_get(struct gop_hw *gop, struct mv_mac_data *mac,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex);
+int mv_gop110_speed_duplex_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex);
+int mv_gop110_autoneg_restart(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_fl_cfg(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_force_link_mode_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool force_link_up,
+ bool force_link_down);
+int mv_gop110_force_link_mode_get(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool *force_link_up,
+ bool *force_link_down);
+int mv_gop110_loopback_set(struct gop_hw *gop, struct mv_mac_data *mac,
+ bool lb);
+void mv_gop_reg_print(char *reg_name, u32 reg);
+
+/* Gig PCS Functions */
+int mv_gop110_gpcs_mode_cfg(struct gop_hw *gop, int pcs_num, bool en);
+int mv_gop110_gpcs_reset(struct gop_hw *gop, int pcs_num, enum mv_reset act);
+
+/* MPCS Functions */
+
+static inline u32 mv_gop110_mpcs_global_read(struct gop_hw *gop, u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.mspg_base, offset);
+}
+
+static inline void mv_gop110_mpcs_global_write(struct gop_hw *gop, u32 offset,
+ u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.mspg_base, offset, data);
+}
+
+static inline void mv_gop110_mpcs_global_print(struct gop_hw *gop,
+ char *reg_name, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_mpcs_global_read(gop, reg));
+}
+
+/* XPCS Functions */
+
+static inline u32 mv_gop110_xpcs_global_read(struct gop_hw *gop, u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.xpcs_base, offset);
+}
+
+static inline void mv_gop110_xpcs_global_write(struct gop_hw *gop, u32 offset,
+ u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.xpcs_base, offset, data);
+}
+
+static inline void mv_gop110_xpcs_global_print(struct gop_hw *gop,
+ char *reg_name, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_xpcs_global_read(gop, reg));
+}
+
+static inline u32 mv_gop110_xpcs_lane_read(struct gop_hw *gop, int lane_num,
+ u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.xpcs_base, offset);
+}
+
+static inline void mv_gop110_xpcs_lane_write(struct gop_hw *gop, int lane_num,
+ u32 offset, u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.xpcs_base, offset, data);
+}
+
+static inline void mv_gop110_xpcs_lane_print(struct gop_hw *gop,
+ char *reg_name,
+ int lane_num, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_xpcs_lane_read(gop, lane_num, reg));
+}
+
+void mv_gop110_xpcs_gl_regs_dump(struct gop_hw *gop);
+void mv_gop110_xpcs_lane_regs_dump(struct gop_hw *gop, int lane);
+int mv_gop110_xpcs_reset(struct gop_hw *gop, enum mv_reset reset);
+int mv_gop110_xpcs_mode(struct gop_hw *gop, int num_of_lanes);
+int mv_gop110_mpcs_mode(struct gop_hw *gop);
+void mv_gop110_mpcs_clock_reset(struct gop_hw *gop, enum mv_reset reset);
+
+/* XLG MAC Functions */
+static inline u32 mv_gop110_xlg_mac_read(struct gop_hw *gop, int mac_num,
+ u32 offset)
+{
+ return(mv_gop_gen_read(gop->gop_110.xlg_mac.base,
+ mac_num * gop->gop_110.xlg_mac.obj_size + offset));
+}
+
+static inline void mv_gop110_xlg_mac_write(struct gop_hw *gop, int mac_num,
+ u32 offset, u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.xlg_mac.base,
+ mac_num * gop->gop_110.xlg_mac.obj_size + offset, data);
+}
+
+static inline void mv_gop110_xlg_mac_print(struct gop_hw *gop, char *reg_name,
+ int mac_num, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_xlg_mac_read(gop, mac_num, reg));
+}
+
+/* MIB MAC Functions */
+static inline u32 mv_gop110_xmib_mac_read(struct gop_hw *gop, int mac_num,
+ u32 offset)
+{
+ return(mv_gop_gen_read(gop->gop_110.xmib.base,
+ mac_num * gop->gop_110.xmib.obj_size + offset));
+}
+
+static inline void mv_gop110_xmib_mac_write(struct gop_hw *gop, int mac_num,
+ u32 offset, u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.xmib.base,
+ mac_num * gop->gop_110.xmib.obj_size + offset, data);
+}
+
+static inline void mv_gop110_xmib_mac_print(struct gop_hw *gop, char *reg_name,
+ int mac_num, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_xmib_mac_read(gop, mac_num, reg));
+}
+
+void mv_gop110_xlg_mac_regs_dump(struct gop_hw *gop, int port);
+int mv_gop110_xlg_mac_reset(struct gop_hw *gop, int mac_num,
+ enum mv_reset reset);
+int mv_gop110_xlg_mac_mode_cfg(struct gop_hw *gop, int mac_num,
+ int num_of_act_lanes);
+int mv_gop110_xlg_mac_loopback_cfg(struct gop_hw *gop, int mac_num,
+ enum mv_lb_type type);
+
+bool mv_gop110_xlg_mac_link_status_get(struct gop_hw *gop, int mac_num);
+void mv_gop110_xlg_mac_port_enable(struct gop_hw *gop, int mac_num);
+void mv_gop110_xlg_mac_port_disable(struct gop_hw *gop, int mac_num);
+void mv_gop110_xlg_mac_port_periodic_xon_set(struct gop_hw *gop,
+ int mac_num,
+ int enable);
+int mv_gop110_xlg_mac_link_status(struct gop_hw *gop, int mac_num,
+ struct mv_port_link_status *pstatus);
+int mv_gop110_xlg_mac_max_rx_size_set(struct gop_hw *gop, int mac_num,
+ int max_rx_size);
+int mv_gop110_xlg_mac_force_link_mode_set(struct gop_hw *gop, int mac_num,
+ bool force_link_up,
+ bool force_link_down);
+int mv_gop110_xlg_mac_speed_duplex_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex);
+int mv_gop110_xlg_mac_speed_duplex_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex);
+int mv_gop110_xlg_mac_fc_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc fc);
+void mv_gop110_xlg_mac_fc_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc *fc);
+int mv_gop110_xlg_mac_port_link_speed_fc(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ int force_link_up);
+void mv_gop110_xlg_port_link_event_mask(struct gop_hw *gop, int mac_num);
+void mv_gop110_xlg_port_external_event_unmask(struct gop_hw *gop,
+ int mac_num,
+ int bit_2_open);
+void mv_gop110_xlg_port_link_event_clear(struct gop_hw *gop, int mac_num);
+void mv_gop110_xlg_2_gig_mac_cfg(struct gop_hw *gop, int mac_num);
+
+/* GMAC Functions */
+static inline u32 mv_gop110_gmac_read(struct gop_hw *gop, int mac_num,
+ u32 offset)
+{
+ return(mv_gop_gen_read(gop->gop_110.gmac.base,
+ mac_num * gop->gop_110.gmac.obj_size + offset));
+}
+
+static inline void mv_gop110_gmac_write(struct gop_hw *gop, int mac_num,
+ u32 offset, u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.gmac.base,
+ mac_num * gop->gop_110.gmac.obj_size + offset, data);
+}
+
+static inline void mv_gop110_gmac_print(struct gop_hw *gop, char *reg_name,
+ int mac_num, u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_gmac_read(gop, mac_num, reg));
+}
+
+void mv_gop110_register_bases_dump(struct gop_hw *gop);
+void mv_gop110_gmac_regs_dump(struct gop_hw *gop, int port);
+int mv_gop110_gmac_reset(struct gop_hw *gop, int mac_num,
+ enum mv_reset reset);
+int mv_gop110_gmac_mode_cfg(struct gop_hw *gop, struct mv_mac_data *mac);
+int mv_gop110_gmac_loopback_cfg(struct gop_hw *gop, int mac_num,
+ enum mv_lb_type type);
+bool mv_gop110_gmac_link_status_get(struct gop_hw *gop, int mac_num);
+void mv_gop110_gmac_port_enable(struct gop_hw *gop, int mac_num);
+void mv_gop110_gmac_port_disable(struct gop_hw *gop, int mac_num);
+void mv_gop110_gmac_port_periodic_xon_set(struct gop_hw *gop, int mac_num,
+ int enable);
+int mv_gop110_gmac_link_status(struct gop_hw *gop, int mac_num,
+ struct mv_port_link_status *pstatus);
+int mv_gop110_gmac_max_rx_size_set(struct gop_hw *gop, int mac_num,
+ int max_rx_size);
+int mv_gop110_gmac_force_link_mode_set(struct gop_hw *gop, int mac_num,
+ bool force_link_up,
+ bool force_link_down);
+int mv_gop110_gmac_force_link_mode_get(struct gop_hw *gop, int mac_num,
+ bool *force_link_up,
+ bool *force_link_down);
+int mv_gop110_gmac_speed_duplex_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ enum mv_port_duplex duplex);
+int mv_gop110_gmac_speed_duplex_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed *speed,
+ enum mv_port_duplex *duplex);
+int mv_gop110_gmac_fc_set(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc fc);
+void mv_gop110_gmac_fc_get(struct gop_hw *gop, int mac_num,
+ enum mv_port_fc *fc);
+int mv_gop110_gmac_port_link_speed_fc(struct gop_hw *gop, int mac_num,
+ enum mv_port_speed speed,
+ int force_link_up);
+void mv_gop110_gmac_port_link_event_mask(struct gop_hw *gop, int mac_num);
+void mv_gop110_gmac_port_link_event_unmask(struct gop_hw *gop, int mac_num);
+void mv_gop110_gmac_port_link_event_clear(struct gop_hw *gop, int mac_num);
+int mv_gop110_gmac_port_autoneg_restart(struct gop_hw *gop, int mac_num);
+
+/* SMI Functions */
+static inline u32 mv_gop110_smi_read(struct gop_hw *gop, u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.smi_base, offset);
+}
+
+static inline void mv_gop110_smi_write(struct gop_hw *gop, u32 offset,
+ u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.smi_base, offset, data);
+}
+
+static inline void mv_gop110_smi_print(struct gop_hw *gop, char *reg_name,
+ u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_smi_read(gop, reg));
+}
+
+/* RFU1 Functions */
+static inline u32 mv_gop110_rfu1_read(struct gop_hw *gop, u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.rfu1_base, offset);
+}
+
+static inline void mv_gop110_rfu1_write(struct gop_hw *gop, u32 offset,
+ u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.rfu1_base, offset, data);
+}
+
+static inline void mv_gop110_rfu1_print(struct gop_hw *gop, char *reg_name,
+ u32 reg)
+{
+ pr_info(" %-32s: 0x%x = 0x%08x\n", reg_name, reg,
+ mv_gop110_rfu1_read(gop, reg));
+}
+
+int mv_gop110_smi_init(struct gop_hw *gop);
+int mv_gop110_smi_phy_addr_cfg(struct gop_hw *gop, int port, int addr);
+
+/* MIB Functions */
+u64 mv_gop110_mib_read64(struct gop_hw *gop, int port, unsigned int offset);
+void mv_gop110_mib_counters_show(struct gop_hw *gop, int port);
+void mv_gop110_mib_counters_stat_update(struct gop_hw *gop, int port,
+ struct gop_stat *gop_statistics);
+
+/* PTP Functions */
+void mv_gop110_ptp_enable(struct gop_hw *gop, int port, bool state);
+
+/*RFU Functions */
+int mv_gop110_netc_init(struct gop_hw *gop,
+ u32 net_comp_config, enum mv_netc_phase phase);
+void mv_gop110_netc_active_port(struct gop_hw *gop, u32 port, u32 val);
+void mv_gop110_netc_xon_set(struct gop_hw *gop, enum mv_gop_port port, bool en);
+
+/* FCA Functions */
+void mv_gop110_fca_send_periodic(struct gop_hw *gop, int mac_num, bool en);
+void mv_gop110_fca_set_periodic_timer(struct gop_hw *gop, int mac_num, u64 timer);
+
+void mv_gop110_fca_tx_enable(struct gop_hw *gop, int mac_num, bool en);
+
+bool mv_gop110_check_fca_tx_state(struct gop_hw *gop, int mac_num);
+
+static inline u32 mv_gop110_fca_read(struct gop_hw *gop, int mac_num,
+ u32 offset)
+{
+ return mv_gop_gen_read(gop->gop_110.fca.base,
+ mac_num * gop->gop_110.fca.obj_size + offset);
+}
+
+static inline void mv_gop110_fca_write(struct gop_hw *gop, int mac_num,
+ u32 offset, u32 data)
+{
+ mv_gop_gen_write(gop->gop_110.fca.base,
+ mac_num * gop->gop_110.fca.obj_size + offset, data);
+}
+
+/*Ethtool Functions */
+void mv_gop110_gmac_registers_dump(struct mv_pp2x_port *port, u32 *regs_buff);
+void mv_gop110_xlg_registers_dump(struct mv_pp2x_port *port, u32 *regs_buff);
+
+#endif /* _MV_GOP_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw_type.h b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw_type.h
new file mode 100644
index 000000000000..f30f7d243870
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_gop110_hw_type.h
@@ -0,0 +1,1983 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MV_GOP_HW_TYPE_H_
+#define _MV_GOP_HW_TYPE_H_
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#define MVCPN110_GOP_MAC_NUM 4
+
+/***********/
+/*GMAC REGS */
+/***********/
+
+/* Port Mac Control0 */
+#define MV_GMAC_PORT_CTRL0_REG (0x0000)
+#define MV_GMAC_PORT_CTRL0_PORTEN_OFFS 0
+#define MV_GMAC_PORT_CTRL0_PORTEN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL0_PORTEN_OFFS)
+
+#define MV_GMAC_PORT_CTRL0_PORTTYPE_OFFS 1
+#define MV_GMAC_PORT_CTRL0_PORTTYPE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL0_PORTTYPE_OFFS)
+
+#define MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_OFFS 2
+#define MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MV_GMAC_PORT_CTRL0_FRAMESIZELIMIT_OFFS)
+
+#define MV_GMAC_PORT_CTRL0_COUNT_EN_OFFS 15
+#define MV_GMAC_PORT_CTRL0_COUNT_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL0_COUNT_EN_OFFS)
+
+/* Port Mac Control1 */
+#define MV_GMAC_PORT_CTRL1_REG (0x0004)
+#define MV_GMAC_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS 0
+#define MV_GMAC_PORT_CTRL1_EN_RX_CRC_CHECK_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS 1
+#define MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_MGMII_MODE_OFFS 2
+#define MV_GMAC_PORT_CTRL1_MGMII_MODE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_MGMII_MODE_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS 3
+#define MV_GMAC_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS 4
+#define MV_GMAC_PORT_CTRL1_DIS_EXCESSIVE_COL_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_GMII_LOOPBACK_OFFS 5
+#define MV_GMAC_PORT_CTRL1_GMII_LOOPBACK_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_GMII_LOOPBACK_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_PCS_LOOPBACK_OFFS 6
+#define MV_GMAC_PORT_CTRL1_PCS_LOOPBACK_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_PCS_LOOPBACK_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_FC_SA_ADDR_LO_OFFS 7
+#define MV_GMAC_PORT_CTRL1_FC_SA_ADDR_LO_MASK \
+ (0x000000ff << MV_GMAC_PORT_CTRL1_FC_SA_ADDR_LO_OFFS)
+
+#define MV_GMAC_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS 15
+#define MV_GMAC_PORT_CTRL1_EN_SHORT_PREAMBLE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS)
+
+/* Port Mac Control2 */
+#define MV_GMAC_PORT_CTRL2_REG (0x0008)
+#define MV_GMAC_PORT_CTRL2_SGMII_MODE_OFFS 0
+#define MV_GMAC_PORT_CTRL2_SGMII_MODE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_SGMII_MODE_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_FC_MODE_OFFS 1
+#define MV_GMAC_PORT_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MV_GMAC_PORT_CTRL2_FC_MODE_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_PCS_EN_OFFS 3
+#define MV_GMAC_PORT_CTRL2_PCS_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_PCS_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_RGMII_MODE_OFFS 4
+#define MV_GMAC_PORT_CTRL2_RGMII_MODE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_RGMII_MODE_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS 5
+#define MV_GMAC_PORT_CTRL2_DIS_PADING_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_DIS_PADING_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_PORTMACRESET_OFFS 6
+#define MV_GMAC_PORT_CTRL2_PORTMACRESET_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_PORTMACRESET_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_TX_DRAIN_OFFS 7
+#define MV_GMAC_PORT_CTRL2_TX_DRAIN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_TX_DRAIN_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_EN_MII_ODD_PRE_OFFS 8
+#define MV_GMAC_PORT_CTRL2_EN_MII_ODD_PRE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_EN_MII_ODD_PRE_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_CLK_125_BYPS_EN_OFFS 9
+#define MV_GMAC_PORT_CTRL2_CLK_125_BYPS_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_CLK_125_BYPS_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_PRBS_CHECK_EN_OFFS 10
+#define MV_GMAC_PORT_CTRL2_PRBS_CHECK_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_PRBS_CHECK_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_PRBS_GEN_EN_OFFS 11
+#define MV_GMAC_PORT_CTRL2_PRBS_GEN_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_PRBS_GEN_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS 12
+#define MV_GMAC_PORT_CTRL2_SELECT_DATA_TO_TX_MASK \
+ (0x00000003 << MV_GMAC_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_EN_COL_ON_BP_OFFS 14
+#define MV_GMAC_PORT_CTRL2_EN_COL_ON_BP_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_EN_COL_ON_BP_OFFS)
+
+#define MV_GMAC_PORT_CTRL2_EARLY_REJECT_MODE_OFFS 15
+#define MV_GMAC_PORT_CTRL2_EARLY_REJECT_MODE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL2_EARLY_REJECT_MODE_OFFS)
+
+/* Port Auto-negotiation Configuration */
+#define MV_GMAC_PORT_AUTO_NEG_CFG_REG (0x000c)
+#define MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS 0
+#define MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS 1
+#define MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS 2
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS 3
+#define MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS 4
+#define MV_GMAC_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS 5
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS 6
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS 7
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS 9
+#define MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS 10
+#define MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS 11
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS 12
+#define MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS 13
+#define MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS 14
+#define MV_GMAC_PORT_AUTO_NEG_CFG_PHY_MODE_MASK \
+ (0x00000001 << MV_GMAC_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS)
+
+#define MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS 15
+#define MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS)
+
+/* Port Status0 */
+#define MV_GMAC_PORT_STATUS0_REG (0x0010)
+#define MV_GMAC_PORT_STATUS0_LINKUP_OFFS 0
+#define MV_GMAC_PORT_STATUS0_LINKUP_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_LINKUP_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_GMIISPEED_OFFS 1
+#define MV_GMAC_PORT_STATUS0_GMIISPEED_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_GMIISPEED_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_MIISPEED_OFFS 2
+#define MV_GMAC_PORT_STATUS0_MIISPEED_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_MIISPEED_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_FULLDX_OFFS 3
+#define MV_GMAC_PORT_STATUS0_FULLDX_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_FULLDX_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_RXFCEN_OFFS 4
+#define MV_GMAC_PORT_STATUS0_RXFCEN_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_RXFCEN_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_TXFCEN_OFFS 5
+#define MV_GMAC_PORT_STATUS0_TXFCEN_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_TXFCEN_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_PORTRXPAUSE_OFFS 6
+#define MV_GMAC_PORT_STATUS0_PORTRXPAUSE_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_PORTRXPAUSE_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_PORTTXPAUSE_OFFS 7
+#define MV_GMAC_PORT_STATUS0_PORTTXPAUSE_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_PORTTXPAUSE_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS 8
+#define MV_GMAC_PORT_STATUS0_PORTIS_DOINGPRESSURE_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_PORTBUFFULL_OFFS 9
+#define MV_GMAC_PORT_STATUS0_PORTBUFFULL_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_PORTBUFFULL_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_SYNCFAIL10MS_OFFS 10
+#define MV_GMAC_PORT_STATUS0_SYNCFAIL10MS_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_SYNCFAIL10MS_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_ANDONE_OFFS 11
+#define MV_GMAC_PORT_STATUS0_ANDONE_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_ANDONE_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS 12
+#define MV_GMAC_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_SERDESPLL_LOCKED_OFFS 13
+#define MV_GMAC_PORT_STATUS0_SERDESPLL_LOCKED_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_SERDESPLL_LOCKED_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_SYNCOK_OFFS 14
+#define MV_GMAC_PORT_STATUS0_SYNCOK_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_SYNCOK_OFFS)
+
+#define MV_GMAC_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS 15
+#define MV_GMAC_PORT_STATUS0_SQUELCHNOT_DETECTED_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS)
+
+/* Port Serial Parameters Configuration */
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_REG (0x0014)
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS 0
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS 1
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS 2
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS 3
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS 4
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_BP_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS 5
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS 6
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_MASK \
+ (0x0000003f << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS 12
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS 13
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS 14
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS 15
+#define MV_GMAC_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS)
+
+/* Port Fifo Configuration 0 */
+#define MV_GMAC_PORT_FIFO_CFG_0_REG (0x0018)
+#define MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS 0
+#define MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_MASK \
+ (0x000000ff << \
+ MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS)
+
+#define MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS 8
+#define MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_MASK \
+ (0x000000ff << \
+ MV_GMAC_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS)
+
+/* Port Fifo Configuration 1 */
+#define MV_GMAC_PORT_FIFO_CFG_1_REG (0x001c)
+#define MV_GMAC_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS 0
+#define MV_GMAC_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_MASK \
+ (0x0000003f << MV_GMAC_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS)
+
+#define MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS 6
+#define MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK \
+ (0x000000ff << MV_GMAC_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+
+#define MV_GMAC_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS 15
+#define MV_GMAC_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS)
+
+/* Port Serdes Configuration0 */
+#define MV_GMAC_PORT_SERDES_CFG0_REG (0x0028)
+#define MV_GMAC_PORT_SERDES_CFG0_SERDESRESET_OFFS 0
+#define MV_GMAC_PORT_SERDES_CFG0_SERDESRESET_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_SERDESRESET_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_PU_TX_OFFS 1
+#define MV_GMAC_PORT_SERDES_CFG0_PU_TX_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_PU_TX_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_PU_RX_OFFS 2
+#define MV_GMAC_PORT_SERDES_CFG0_PU_RX_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_PU_RX_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_PU_PLL_OFFS 3
+#define MV_GMAC_PORT_SERDES_CFG0_PU_PLL_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_PU_PLL_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_PU_IVREF_OFFS 4
+#define MV_GMAC_PORT_SERDES_CFG0_PU_IVREF_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_PU_IVREF_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_TESTEN_OFFS 5
+#define MV_GMAC_PORT_SERDES_CFG0_TESTEN_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_TESTEN_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_DPHER_EN_OFFS 6
+#define MV_GMAC_PORT_SERDES_CFG0_DPHER_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_DPHER_EN_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS 7
+#define MV_GMAC_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS 8
+#define MV_GMAC_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS 9
+#define MV_GMAC_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS 10
+#define MV_GMAC_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS 11
+#define MV_GMAC_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_TERM75_TX_OFFS 12
+#define MV_GMAC_PORT_SERDES_CFG0_TERM75_TX_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_TERM75_TX_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_OUTAMP_OFFS 13
+#define MV_GMAC_PORT_SERDES_CFG0_OUTAMP_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_OUTAMP_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS 14
+#define MV_GMAC_PORT_SERDES_CFG0_BTS712_FIX_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS)
+
+#define MV_GMAC_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS 15
+#define MV_GMAC_PORT_SERDES_CFG0_BTS156_FIX_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS)
+
+/* Port Serdes Configuration1 */
+#define MV_GMAC_PORT_SERDES_CFG1_REG (0x002c)
+#define MV_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS 0
+#define MV_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS 1
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_MEN_OFFS 2
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_MEN_MASK \
+ (0x00000003 << MV_GMAC_GMAC_PORT_SERDES_CFG1_MEN_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_VCMS_OFFS 4
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_VCMS_MASK \
+ (0x00000001 << MV_GMAC_GMAC_PORT_SERDES_CFG1_VCMS_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS 5
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS 6
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS 7
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_MASK \
+ (0x00000001 << MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS 8
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_MASK \
+ (0x0000001f << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS 13
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS 14
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS)
+
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS 15
+#define MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_MASK \
+ (0x00000001 << \
+ MV_GMAC_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS)
+
+/* Port Serdes Configuration2 */
+#define MV_GMAC_PORT_SERDES_CFG2_REG (0x0030)
+#define MV_GMAC_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS 0
+#define MV_GMAC_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_MASK \
+ (0x0000ffff << \
+ MV_GMAC_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS)
+
+/* Port Serdes Configuration3 */
+#define MV_GMAC_PORT_SERDES_CFG3_REG (0x0034)
+#define MV_GMAC_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS 0
+#define MV_GMAC_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_MASK \
+ (0x0000ffff << \
+ MV_GMAC_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS)
+
+/* Port Prbs Status */
+#define MV_GMAC_PORT_PRBS_STATUS_REG (0x0038)
+#define MV_GMAC_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS 0
+#define MV_GMAC_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_MASK \
+ (0x00000001 << MV_GMAC_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS)
+
+#define MV_GMAC_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS 1
+#define MV_GMAC_PORT_PRBS_STATUS_PRBSCHECKRDY_MASK \
+ (0x00000001 << MV_GMAC_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS)
+
+/* Port Prbs Error Counter */
+#define MV_GMAC_PORT_PRBS_ERR_CNTR_REG (0x003c)
+#define MV_GMAC_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS 0
+#define MV_GMAC_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_MASK \
+ (0x0000ffff << MV_GMAC_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS)
+
+/* Port Status1 */
+#define MV_GMAC_PORT_STATUS1_REG (0x0040)
+#define MV_GMAC_PORT_STATUS1_MEDIAACTIVE_OFFS 0
+#define MV_GMAC_PORT_STATUS1_MEDIAACTIVE_MASK \
+ (0x00000001 << MV_GMAC_PORT_STATUS1_MEDIAACTIVE_OFFS)
+
+/* Port Mib Counters Control */
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_REG (0x0044)
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS 0
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS 1
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS 2
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS 3
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS 5
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 6
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 7
+#define MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Port Mac Control3 */
+#define MV_GMAC_PORT_CTRL3_REG (0x0048)
+#define MV_GMAC_PORT_CTRL3_BUF_SIZE_OFFS 0
+#define MV_GMAC_PORT_CTRL3_BUF_SIZE_MASK \
+ (0x0000003f << MV_GMAC_PORT_CTRL3_BUF_SIZE_OFFS)
+
+#define MV_GMAC_PORT_CTRL3_IPG_DATA_OFFS 6
+#define MV_GMAC_PORT_CTRL3_IPG_DATA_MASK \
+ (0x000001ff << MV_GMAC_PORT_CTRL3_IPG_DATA_OFFS)
+
+#define MV_GMAC_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS 15
+#define MV_GMAC_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS)
+
+/* QSGMII */
+#define MV_GMAC_QSGMII_REG (0x004c)
+#define MV_GMAC_QSGMII_QSGMII_REG_OFFS 0
+#define MV_GMAC_QSGMII_QSGMII_REG_MASK \
+ (0x0000ffff << MV_GMAC_QSGMII_QSGMII_REG_OFFS)
+
+/* Qsgmii Status */
+#define MV_GMAC_QSGMII_STATUS_REG (0x0050)
+#define MV_GMAC_QSGMII_STATUS_QSGMII_STATUS_OFFS 0
+#define MV_GMAC_QSGMII_STATUS_QSGMII_STATUS_MASK \
+ (0x000000ff << MV_GMAC_QSGMII_STATUS_QSGMII_STATUS_OFFS)
+
+/* Qsgmii Prbs Counter */
+#define MV_GMAC_QSGMII_PRBS_CNTR_REG (0x0054)
+#define MV_GMAC_QSGMII_PRBS_CNTR_QSGMII_PRBS_ERR_CNT_REG_OFFS 0
+#define MV_GMAC_QSGMII_PRBS_CNTR_QSGMII_PRBS_ERR_CNT_REG_MASK \
+ (0x0000ffff << \
+ MV_GMAC_QSGMII_PRBS_CNTR_QSGMII_PRBS_ERR_CNT_REG_OFFS)
+
+/* Ccfc Port Speed Timer%p */
+#define MV_GMAC_CCFC_PORT_SPEED_TIMER_REG(t) (0x0058 + t * 4)
+#define MV_GMAC_CCFC_PORT_SPEED_TIMER_PORTSPEEDTIMER_OFFS 0
+#define MV_GMAC_CCFC_PORT_SPEED_TIMER_PORTSPEEDTIMER_MASK \
+ (0x0000ffff << \
+ MV_GMAC_CCFC_PORT_SPEED_TIMER_PORTSPEEDTIMER_OFFS)
+
+/* Fc Dsa Tag %n */
+#define MV_GMAC_FC_DSA_TAG_REG(n) (0x0078 + 4 * n)
+#define MV_GMAC_FC_DSA_TAG_DSATAGREGN_OFFS 0
+#define MV_GMAC_FC_DSA_TAG_DSATAGREGN_MASK \
+ (0x0000ffff << MV_GMAC_FC_DSA_TAG_DSATAGREGN_OFFS)
+
+/* Link Level Flow Control Window Reg 0 */
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0 (0x0088)
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0_LLFC_FC_WINDOW_REG0_OFFS 0
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0_LLFC_FC_WINDOW_REG0_MASK \
+ (0x0000ffff << \
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0_LLFC_FC_WINDOW_REG0_OFFS)
+
+/* Link Level Flow Control Window Reg 1 */
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1 (0x008c)
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_FC_WINDOW_REG1_OFFS 0
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_FC_WINDOW_REG1_MASK \
+ (0x00007fff << \
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_FC_WINDOW_REG1_OFFS)
+
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_RATE_LIMIT_EN_OFFS 15
+#define MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_RATE_LIMIT_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1_LLFC_RATE_LIMIT_EN_OFFS)
+
+/* Port Mac Control4 */
+#define MV_GMAC_PORT_CTRL4_REG (0x0090)
+#define MV_GMAC_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS 0
+#define MV_GMAC_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_PREAMBLE_FIX_OFFS 1
+#define MV_GMAC_PORT_CTRL4_PREAMBLE_FIX_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_PREAMBLE_FIX_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS 2
+#define MV_GMAC_PORT_CTRL4_SQ_DETECT_FIX_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_FC_EN_RX_OFFS 3
+#define MV_GMAC_PORT_CTRL4_FC_EN_RX_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_FC_EN_RX_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_FC_EN_TX_OFFS 4
+#define MV_GMAC_PORT_CTRL4_FC_EN_TX_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_FC_EN_TX_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_DP_CLK_SEL_OFFS 5
+#define MV_GMAC_PORT_CTRL4_DP_CLK_SEL_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_DP_CLK_SEL_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_SYNC_BYPASS_OFFS 6
+#define MV_GMAC_PORT_CTRL4_SYNC_BYPASS_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_SYNC_BYPASS_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS 7
+#define MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS 8
+#define MV_GMAC_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_MARVELL_HEADER_EN_OFFS 9
+#define MV_GMAC_PORT_CTRL4_MARVELL_HEADER_EN_MASK \
+ (0x00000001 << MV_GMAC_PORT_CTRL4_MARVELL_HEADER_EN_OFFS)
+
+#define MV_GMAC_PORT_CTRL4_LEDS_NUMBER_OFFS 10
+#define MV_GMAC_PORT_CTRL4_LEDS_NUMBER_MASK \
+ (0x0000003f << MV_GMAC_PORT_CTRL4_LEDS_NUMBER_OFFS)
+
+/* Port Serial Parameters 1 Configuration */
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_REG (0x0094)
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_RX_STANDARD_PRBS7_OFFS 0
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_RX_STANDARD_PRBS7_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_1_CFG_RX_STANDARD_PRBS7_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_PFC_EN_OFFS 1
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_PFC_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_PFC_EN_OFFS)
+
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_UNKNOWN_FC_EN_OFFS 2
+#define MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_UNKNOWN_FC_EN_MASK \
+ (0x00000001 << \
+ MV_GMAC_PORT_SERIAL_PARAM_1_CFG_FORWARD_UNKNOWN_FC_EN_OFFS)
+
+/* Lpi Control 0 */
+#define MV_GMAC_LPI_CTRL_0_REG (0x00c0)
+#define MV_GMAC_LPI_CTRL_0_LI_LIMIT_OFFS 0
+#define MV_GMAC_LPI_CTRL_0_LI_LIMIT_MASK \
+ (0x000000ff << MV_GMAC_LPI_CTRL_0_LI_LIMIT_OFFS)
+
+#define MV_GMAC_LPI_CTRL_0_TS_LIMIT_OFFS 8
+#define MV_GMAC_LPI_CTRL_0_TS_LIMIT_MASK \
+ (0x000000ff << MV_GMAC_LPI_CTRL_0_TS_LIMIT_OFFS)
+
+/* Lpi Control 1 */
+#define MV_GMAC_LPI_CTRL_1_REG (0x00c4)
+#define MV_GMAC_LPI_CTRL_1_LPI_REQUEST_EN_OFFS 0
+#define MV_GMAC_LPI_CTRL_1_LPI_REQUEST_EN_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_1_LPI_REQUEST_EN_OFFS)
+
+#define MV_GMAC_LPI_CTRL_1_LPI_REQUEST_FORCE_OFFS 1
+#define MV_GMAC_LPI_CTRL_1_LPI_REQUEST_FORCE_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_1_LPI_REQUEST_FORCE_OFFS)
+
+#define MV_GMAC_LPI_CTRL_1_LPI_MANUAL_MODE_OFFS 2
+#define MV_GMAC_LPI_CTRL_1_LPI_MANUAL_MODE_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_1_LPI_MANUAL_MODE_OFFS)
+
+#define MV_GMAC_LPI_CTRL_1_EN_GTX_CLK_HALT_OFFS 3
+#define MV_GMAC_LPI_CTRL_1_EN_GTX_CLK_HALT_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_1_EN_GTX_CLK_HALT_OFFS)
+
+#define MV_GMAC_LPI_CTRL_1_TW_LIMIT_OFFS 4
+#define MV_GMAC_LPI_CTRL_1_TW_LIMIT_MASK \
+ (0x00000fff << MV_GMAC_LPI_CTRL_1_TW_LIMIT_OFFS)
+
+/* Lpi Control 2 */
+#define MV_GMAC_LPI_CTRL_2_REG (0x00c8)
+#define MV_GMAC_LPI_CTRL_2_LPI_CLK_DIV_OFFS 0
+#define MV_GMAC_LPI_CTRL_2_LPI_CLK_DIV_MASK \
+ (0x0000007f << MV_GMAC_LPI_CTRL_2_LPI_CLK_DIV_OFFS)
+
+#define MV_GMAC_LPI_CTRL_2_PCS_RX_ER_MASK_DISABLE_OFFS 7
+#define MV_GMAC_LPI_CTRL_2_PCS_RX_ER_MASK_DISABLE_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_2_PCS_RX_ER_MASK_DISABLE_OFFS)
+
+#define MV_GMAC_LPI_CTRL_2_EN_GMII2MII_LPI_FIX_OFFS 8
+#define MV_GMAC_LPI_CTRL_2_EN_GMII2MII_LPI_FIX_MASK \
+ (0x00000001 << MV_GMAC_LPI_CTRL_2_EN_GMII2MII_LPI_FIX_OFFS)
+
+/* Lpi Status */
+#define MV_GMAC_LPI_STATUS_REG (0x00cc)
+#define MV_GMAC_LPI_STATUS_PCS_RX_LPI_STATUS_OFFS 0
+#define MV_GMAC_LPI_STATUS_PCS_RX_LPI_STATUS_MASK \
+ (0x00000001 << MV_GMAC_LPI_STATUS_PCS_RX_LPI_STATUS_OFFS)
+
+#define MV_GMAC_LPI_STATUS_PCS_TX_LPI_STATUS_OFFS 1
+#define MV_GMAC_LPI_STATUS_PCS_TX_LPI_STATUS_MASK \
+ (0x00000001 << MV_GMAC_LPI_STATUS_PCS_TX_LPI_STATUS_OFFS)
+
+#define MV_GMAC_LPI_STATUS_MAC_RX_LP_IDLE_STATUS_OFFS 2
+#define MV_GMAC_LPI_STATUS_MAC_RX_LP_IDLE_STATUS_MASK \
+ (0x00000001 << MV_GMAC_LPI_STATUS_MAC_RX_LP_IDLE_STATUS_OFFS)
+
+#define MV_GMAC_LPI_STATUS_MAC_TX_LP_WAIT_STATUS_OFFS 3
+#define MV_GMAC_LPI_STATUS_MAC_TX_LP_WAIT_STATUS_MASK \
+ (0x00000001 << MV_GMAC_LPI_STATUS_MAC_TX_LP_WAIT_STATUS_OFFS)
+
+#define MV_GMAC_LPI_STATUS_MAC_TX_LP_IDLE_STATUS_OFFS 4
+#define MV_GMAC_LPI_STATUS_MAC_TX_LP_IDLE_STATUS_MASK \
+ (0x00000001 << MV_GMAC_LPI_STATUS_MAC_TX_LP_IDLE_STATUS_OFFS)
+
+/* Lpi Counter */
+#define MV_GMAC_LPI_CNTR_REG (0x00d0)
+#define MV_GMAC_LPI_CNTR_LPI_COUNTER_OFFS 0
+#define MV_GMAC_LPI_CNTR_LPI_COUNTER_MASK \
+ (0x0000ffff << MV_GMAC_LPI_CNTR_LPI_COUNTER_OFFS)
+
+/* Pulse 1 Ms Low */
+#define MV_GMAC_PULSE_1_MS_LOW_REG (0x00d4)
+#define MV_GMAC_PULSE_1_MS_LOW_PULSE_1MS_MAX_LOW_OFFS 0
+#define MV_GMAC_PULSE_1_MS_LOW_PULSE_1MS_MAX_LOW_MASK \
+ (0x0000ffff << MV_GMAC_PULSE_1_MS_LOW_PULSE_1MS_MAX_LOW_OFFS)
+
+/* Pulse 1 Ms High */
+#define MV_GMAC_PULSE_1_MS_HIGH_REG (0x00d8)
+#define MV_GMAC_PULSE_1_MS_HIGH_PULSE_1MS_MAX_HIGH_OFFS 0
+#define MV_GMAC_PULSE_1_MS_HIGH_PULSE_1MS_MAX_HIGH_MASK \
+ (0x0000ffff << MV_GMAC_PULSE_1_MS_HIGH_PULSE_1MS_MAX_HIGH_OFFS)
+
+/* Port Interrupt Cause */
+#define MV_GMAC_INTERRUPT_CAUSE_REG (0x0020)
+/* Port Interrupt Mask */
+#define MV_GMAC_INTERRUPT_MASK_REG (0x0024)
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK (0x1 << \
+ MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary Cause */
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_REG (0x00A0)
+/* Port Interrupt Summary Mask */
+#define MV_GMAC_INTERRUPT_SUM_MASK_REG (0x00A4)
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK (0x1 << \
+ MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS)
+
+/**************/
+/* XLGMAC REGS */
+/**************/
+
+/* Port Mac Control0 */
+#define MV_XLG_PORT_MAC_CTRL0_REG (0x0000)
+#define MV_XLG_MAC_CTRL0_PORTEN_OFFS 0
+#define MV_XLG_MAC_CTRL0_PORTEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PORTEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MACRESETN_OFFS 1
+#define MV_XLG_MAC_CTRL0_MACRESETN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MACRESETN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS 2
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS 3
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS 5
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXFCEN_OFFS 7
+#define MV_XLG_MAC_CTRL0_RXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXFCEN_OFFS 8
+#define MV_XLG_MAC_CTRL0_TXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_TXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS 9
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS 10
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS 11
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS 13
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS 14
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS 15
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+/* Port Mac Control1 */
+#define MV_XLG_PORT_MAC_CTRL1_REG (0x0004)
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS 0
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS)
+
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS 13
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS 14
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS 15
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS)
+
+/* Port Mac Control2 */
+#define MV_XLG_PORT_MAC_CTRL2_REG (0x0008)
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS 0
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_MASK \
+ (0x000000ff << MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS)
+
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS 8
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS 9
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS)
+
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS 10
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS 13
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FC_MODE_OFFS 14
+#define MV_XLG_MAC_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL2_FC_MODE_OFFS)
+
+/* Port Status */
+#define MV_XLG_MAC_PORT_STATUS_REG (0x000c)
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS 1
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS 2
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS 3
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS 4
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS 5
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS 6
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS 7
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS 8
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS)
+
+/* Port Fifos Thresholds Configuration */
+#define MV_XLG_PORT_FIFOS_THRS_CFG_REG (0x0010)
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS 0
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS 5
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS 11
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS)
+
+/* Port Mac Control3 */
+#define MV_XLG_PORT_MAC_CTRL3_REG (0x001c)
+#define MV_XLG_MAC_CTRL3_BUFSIZE_OFFS 0
+#define MV_XLG_MAC_CTRL3_BUFSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_CTRL3_BUFSIZE_OFFS)
+
+#define MV_XLG_MAC_CTRL3_XTRAIPG_OFFS 6
+#define MV_XLG_MAC_CTRL3_XTRAIPG_MASK \
+ (0x0000007f << MV_XLG_MAC_CTRL3_XTRAIPG_OFFS)
+
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS 13
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+
+/* Port Per Prio Flow Control Status */
+#define MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG (0x0020)
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS)
+
+/* Debug Bus Status */
+#define MV_XLG_DEBUG_BUS_STATUS_REG (0x0024)
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS 0
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_MASK \
+ (0x0000ffff << MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS)
+
+/* Port Metal Fix */
+#define MV_XLG_PORT_METAL_FIX_REG (0x002c)
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS 0
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS 1
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS 2
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS 3
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS 4
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS 5
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS 6
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS 7
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS 8
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS 9
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_MASK \
+ (0x0000000f << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS 13
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_MASK \
+ (0x00000007 << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS)
+
+/* Xg Mib Counters Control */
+#define MV_XLG_MIB_CNTRS_CTRL_REG (0x0030)
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS 0
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS 1
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS 2
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS 3
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS 5
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_MASK \
+ (0x0000003f << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 11
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 12
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Cn/ccfc Timer%i */
+#define MV_XLG_CNCCFC_TIMERI_REG(t) ((0x0038 + t * 4))
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS 0
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_MASK \
+ (0x0000ffff << MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS)
+
+/* Ppfc Control */
+#define MV_XLG_MAC_PPFC_CTRL_REG (0x0060)
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS 0
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS)
+
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS 9
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS)
+
+/* Fc Dsa Tag 0 */
+#define MV_XLG_MAC_FC_DSA_TAG_0_REG (0x0068)
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS)
+
+/* Fc Dsa Tag 1 */
+#define MV_XLG_MAC_FC_DSA_TAG_1_REG (0x006c)
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS)
+
+/* Fc Dsa Tag 2 */
+#define MV_XLG_MAC_FC_DSA_TAG_2_REG (0x0070)
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS)
+
+/* Fc Dsa Tag 3 */
+#define MV_XLG_MAC_FC_DSA_TAG_3_REG (0x0074)
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS)
+
+/* Dic Budget Compensation */
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG (0x0080)
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS 0
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_MASK \
+ (0x0000ffff << \
+ MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS)
+
+/* Port Mac Control4 */
+#define MV_XLG_PORT_MAC_CTRL4_REG (0x0084)
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS 0
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS)
+
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS 1
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS 2
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS 3
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS)
+
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS 4
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_MASK \
+ (0x00000001 << \
+ MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS 5
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS 6
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS 7
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_USE_XPCS_OFFS 8
+#define MV_XLG_MAC_CTRL4_USE_XPCS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_USE_XPCS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS 9
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS 10
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS 12
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS)
+
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_OFFS 14
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_OFFS)
+
+/* Port Mac Control5 */
+#define MV_XLG_PORT_MAC_CTRL5_REG (0x0088)
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS 0
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_MASK \
+ (0x0000000f << MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS 4
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS 7
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS 10
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS)
+
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS 13
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS)
+
+/* External Control */
+#define MV_XLG_MAC_EXT_CTRL_REG (0x0090)
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS 0
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS 1
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS 2
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS 3
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS 4
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS 5
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS 6
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS 7
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS 8
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS 9
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS 10
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS 11
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS 12
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS 13
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS 14
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS 15
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS)
+
+/* Macro Control */
+#define MV_XLG_MAC_MACRO_CTRL_REG (0x0094)
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS 0
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS 1
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS 2
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS 3
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS 4
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS 5
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS 6
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS 7
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS 8
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS 9
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS 10
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS 11
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS 12
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS 13
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS 14
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS 15
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS)
+
+#define MV_XLG_MAC_DIC_PPM_IPG_REDUCE_REG (0x0094)
+
+/* Port Interrupt Cause */
+#define MV_XLG_INTERRUPT_CAUSE_REG (0x0014)
+/* Port Interrupt Mask */
+#define MV_XLG_INTERRUPT_MASK_REG (0x0018)
+#define MV_XLG_INTERRUPT_LINK_CHANGE_OFFS 1
+#define MV_XLG_INTERRUPT_LINK_CHANGE_MASK (0x1 << \
+ MV_XLG_INTERRUPT_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary Cause */
+#define MV_XLG_EXTERNAL_INTERRUPT_CAUSE_REG (0x0058)
+/* Port Interrupt Summary Mask */
+#define MV_XLG_EXTERNAL_INTERRUPT_MASK_REG (0x005C)
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS 1
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_MASK (0x1 << \
+ MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS)
+
+/***********/
+/*XPCS REGS */
+/***********/
+
+/* Global Configuration 0 */
+#define MV_XPCS_GLOBAL_CFG_0_REG (0x0)
+#define MV_XPCS_GLOBAL_CFG_0_PCSRESET_OFFS 0
+#define MV_XPCS_GLOBAL_CFG_0_PCSRESET_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_PCSRESET_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_DESKEWRESET_OFFS 1
+#define MV_XPCS_GLOBAL_CFG_0_DESKEWRESET_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_DESKEWRESET_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_TXRESET_OFFS 2
+#define MV_XPCS_GLOBAL_CFG_0_TXRESET_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_TXRESET_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_PCSMODE_OFFS 3
+#define MV_XPCS_GLOBAL_CFG_0_PCSMODE_MASK \
+ (0x00000003 << MV_XPCS_GLOBAL_CFG_0_PCSMODE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_LANEACTIVE_OFFS 5
+#define MV_XPCS_GLOBAL_CFG_0_LANEACTIVE_MASK \
+ (0x00000003 << MV_XPCS_GLOBAL_CFG_0_LANEACTIVE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_INDIVIDUALMODE_OFFS 7
+#define MV_XPCS_GLOBAL_CFG_0_INDIVIDUALMODE_MASK \
+ (0x0000003f << MV_XPCS_GLOBAL_CFG_0_INDIVIDUALMODE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_TXSMMODE_OFFS 13
+#define MV_XPCS_GLOBAL_CFG_0_TXSMMODE_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_TXSMMODE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_TXSMIDLECNTDISABLE_OFFS 14
+#define MV_XPCS_GLOBAL_CFG_0_TXSMIDLECNTDISABLE_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_TXSMIDLECNTDISABLE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_0_COMMADETCT2NDSYNCSMEN_OFFS 15
+#define MV_XPCS_GLOBAL_CFG_0_COMMADETCT2NDSYNCSMEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_0_COMMADETCT2NDSYNCSMEN_OFFS)
+
+/* Global Configuration 1 */
+#define MV_XPCS_GLOBAL_CFG_1_REG (0x4)
+#define MV_XPCS_GLOBAL_CFG_1_MACLOOPBACKEN_OFFS 0
+#define MV_XPCS_GLOBAL_CFG_1_MACLOOPBACKEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_MACLOOPBACKEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_PCSLOOPBACKEN_OFFS 1
+#define MV_XPCS_GLOBAL_CFG_1_PCSLOOPBACKEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_PCSLOOPBACKEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_REPEATERMODEEN_OFFS 2
+#define MV_XPCS_GLOBAL_CFG_1_REPEATERMODEEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_REPEATERMODEEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_LOOPBACKCLKSEL_OFFS 3
+#define MV_XPCS_GLOBAL_CFG_1_LOOPBACKCLKSEL_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_LOOPBACKCLKSEL_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_DESKEWCLKSEL_OFFS 4
+#define MV_XPCS_GLOBAL_CFG_1_DESKEWCLKSEL_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_DESKEWCLKSEL_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_TXSMREPEATERMODE_OFFS 5
+#define MV_XPCS_GLOBAL_CFG_1_TXSMREPEATERMODE_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_TXSMREPEATERMODE_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_RXLOCKBYPASSEN_OFFS 6
+#define MV_XPCS_GLOBAL_CFG_1_RXLOCKBYPASSEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_RXLOCKBYPASSEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_TXLOCKBYPASSEN_OFFS 7
+#define MV_XPCS_GLOBAL_CFG_1_TXLOCKBYPASSEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_TXLOCKBYPASSEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_REMOTEFAULTDIS_OFFS 8
+#define MV_XPCS_GLOBAL_CFG_1_REMOTEFAULTDIS_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_REMOTEFAULTDIS_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_SIGNALDETDOWNLOCALFAULTGENDIS_OFFS 9
+#define MV_XPCS_GLOBAL_CFG_1_SIGNALDETDOWNLOCALFAULTGENDIS_MASK \
+ (0x00000001 << \
+ MV_XPCS_GLOBAL_CFG_1_SIGNALDETDOWNLOCALFAULTGENDIS_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_CJPATGENEN_OFFS 10
+#define MV_XPCS_GLOBAL_CFG_1_CJPATGENEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_CJPATGENEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_CRPATGENEN_OFFS 11
+#define MV_XPCS_GLOBAL_CFG_1_CRPATGENEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_CRPATGENEN_OFFS)
+
+#define MV_XPCS_GLOBAL_CFG_1_CJRFORCEDISPEN_OFFS 12
+#define MV_XPCS_GLOBAL_CFG_1_CJRFORCEDISPEN_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_CFG_1_CJRFORCEDISPEN_OFFS)
+
+/* Global Fifo Threshold Configuration */
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_REG (0x8)
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWTIMEOUTLIMIT_OFFS 0
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWTIMEOUTLIMIT_MASK \
+ (0x0000000f << \
+ MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWTIMEOUTLIMIT_OFFS)
+
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFOWRADDRFIX_OFFS 4
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFOWRADDRFIX_MASK \
+ (0x0000001f << \
+ MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFOWRADDRFIX_OFFS)
+
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFORDTH_OFFS 9
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFORDTH_MASK \
+ (0x0000000f << MV_XPCS_GLOBAL_FIFO_THR_CFG_DESKEWFIFORDTH_OFFS)
+
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFORDTH_OFFS 13
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFORDTH_MASK \
+ (0x00000007 << MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFORDTH_OFFS)
+
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFOEXTRAIDLECHKDIS_OFFS 16
+#define MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFOEXTRAIDLECHKDIS_MASK \
+ (0x00000001 << \
+ MV_XPCS_GLOBAL_FIFO_THR_CFG_PPMFIFOEXTRAIDLECHKDIS_OFFS)
+
+/* Global Max Idle Counter */
+#define MV_XPCS_GLOBAL_MAX_IDLE_CNTR_REG (0xc)
+#define MV_XPCS_GLOBAL_MAX_IDLE_CNTR_MAXIDLECNT_OFFS 0
+#define MV_XPCS_GLOBAL_MAX_IDLE_CNTR_MAXIDLECNT_MASK \
+ (0x0000ffff << MV_XPCS_GLOBAL_MAX_IDLE_CNTR_MAXIDLECNT_OFFS)
+
+/* Global Status */
+#define MV_XPCS_GLOBAL_STATUS_REG (0x10)
+#define MV_XPCS_GLOBAL_STATUS_LINKUP_OFFS 0
+#define MV_XPCS_GLOBAL_STATUS_LINKUP_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_STATUS_LINKUP_OFFS)
+
+#define MV_XPCS_GLOBAL_STATUS_DESKEWACQUIRED_OFFS 1
+#define MV_XPCS_GLOBAL_STATUS_DESKEWACQUIRED_MASK \
+ (0x00000001 << MV_XPCS_GLOBAL_STATUS_DESKEWACQUIRED_OFFS)
+
+/* Global Deskew Error Counter */
+#define MV_XPCS_GLOBAL_DESKEW_ERR_CNTR_REG (0x20)
+#define MV_XPCS_GLOBAL_DESKEW_ERR_CNTR_DESKEWERRCNT_OFFS 0
+#define MV_XPCS_GLOBAL_DESKEW_ERR_CNTR_DESKEWERRCNT_MASK \
+ (0x0000ffff << MV_XPCS_GLOBAL_DESKEW_ERR_CNTR_DESKEWERRCNT_OFFS)
+
+/* Tx Packets Counter LSB */
+#define MV_XPCS_TX_PCKTS_CNTR_LSB_REG (0x30)
+#define MV_XPCS_TX_PCKTS_CNTR_LSB_TXPCKTCNTRLSB_OFFS 0
+#define MV_XPCS_TX_PCKTS_CNTR_LSB_TXPCKTCNTRLSB_MASK \
+ (0x0000ffff << MV_XPCS_TX_PCKTS_CNTR_LSB_TXPCKTCNTRLSB_OFFS)
+
+/* Tx Packets Counter MSB */
+#define MV_XPCS_TX_PCKTS_CNTR_MSB_REG (0x34)
+#define MV_XPCS_TX_PCKTS_CNTR_MSB_TXPCKTCNTRMSB_OFFS 0
+#define MV_XPCS_TX_PCKTS_CNTR_MSB_TXPCKTCNTRMSB_MASK \
+ (0x0000ffff << MV_XPCS_TX_PCKTS_CNTR_MSB_TXPCKTCNTRMSB_OFFS)
+
+/* XPCS per Lane registers */
+
+/* Lane Configuration 0 */
+#define MV_XPCS_LANE_CFG_0_REG (0x50)
+#define MV_XPCS_LANE_CFG_0_TXRESETIND_OFFS 0
+#define MV_XPCS_LANE_CFG_0_TXRESETIND_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_TXRESETIND_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_RXRESETIND_OFFS 1
+#define MV_XPCS_LANE_CFG_0_RXRESETIND_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_RXRESETIND_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_INDIVIDUALLOOPBACK_OFFS 2
+#define MV_XPCS_LANE_CFG_0_INDIVIDUALLOOPBACK_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_INDIVIDUALLOOPBACK_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_INDIVIDUALLINELOOPBACK_OFFS 3
+#define MV_XPCS_LANE_CFG_0_INDIVIDUALLINELOOPBACK_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_INDIVIDUALLINELOOPBACK_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_TXSMBYPASSEN_OFFS 4
+#define MV_XPCS_LANE_CFG_0_TXSMBYPASSEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_TXSMBYPASSEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_RXIDLEGENBYPASSEN_OFFS 5
+#define MV_XPCS_LANE_CFG_0_RXIDLEGENBYPASSEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_RXIDLEGENBYPASSEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_SIGNALDETECTBYPASSEN_OFFS 6
+#define MV_XPCS_LANE_CFG_0_SIGNALDETECTBYPASSEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_SIGNALDETECTBYPASSEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_CJPATCHKEN_OFFS 7
+#define MV_XPCS_LANE_CFG_0_CJPATCHKEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_CJPATCHKEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_CRPATCHKEN_OFFS 8
+#define MV_XPCS_LANE_CFG_0_CRPATCHKEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_CRPATCHKEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_PRBSCHECKEN_OFFS 11
+#define MV_XPCS_LANE_CFG_0_PRBSCHECKEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_PRBSCHECKEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_TESTGENEN_OFFS 12
+#define MV_XPCS_LANE_CFG_0_TESTGENEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_TESTGENEN_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_TESTMODE_OFFS 13
+#define MV_XPCS_LANE_CFG_0_TESTMODE_MASK \
+ (0x00000003 << MV_XPCS_LANE_CFG_0_TESTMODE_OFFS)
+
+#define MV_XPCS_LANE_CFG_0_TESTMODEEN_OFFS 15
+#define MV_XPCS_LANE_CFG_0_TESTMODEEN_MASK \
+ (0x00000001 << MV_XPCS_LANE_CFG_0_TESTMODEEN_OFFS)
+
+/* Lane Configuration 1 */
+#define MV_XPCS_LANE_CFG_1_REG (0x54)
+#define MV_XPCS_LANE_CFG_1_LED0CTRL_OFFS 0
+#define MV_XPCS_LANE_CFG_1_LED0CTRL_MASK \
+ (0x0000000f << MV_XPCS_LANE_CFG_1_LED0CTRL_OFFS)
+
+#define MV_XPCS_LANE_CFG_1_LED1CTRL_OFFS 4
+#define MV_XPCS_LANE_CFG_1_LED1CTRL_MASK \
+ (0x0000000f << MV_XPCS_LANE_CFG_1_LED1CTRL_OFFS)
+
+#define MV_XPCS_LANE_CFG_1_TXSWPSEL_OFFS 8
+#define MV_XPCS_LANE_CFG_1_TXSWPSEL_MASK \
+ (0x00000007 << MV_XPCS_LANE_CFG_1_TXSWPSEL_OFFS)
+
+#define MV_XPCS_LANE_CFG_1_RXSWPSEL_OFFS 11
+#define MV_XPCS_LANE_CFG_1_RXSWPSEL_MASK \
+ (0x00000007 << MV_XPCS_LANE_CFG_1_RXSWPSEL_OFFS)
+
+/* Lane Status */
+#define MV_XPCS_LANE_STATUS_REG (0x5c)
+#define MV_XPCS_LANE_STATUS_PRBSCHECKLOCKED_OFFS 0
+#define MV_XPCS_LANE_STATUS_PRBSCHECKLOCKED_MASK \
+ (0x00000001 << MV_XPCS_LANE_STATUS_PRBSCHECKLOCKED_OFFS)
+
+#define MV_XPCS_LANE_STATUS_PLLLOCKED_OFFS 1
+#define MV_XPCS_LANE_STATUS_PLLLOCKED_MASK \
+ (0x00000001 << MV_XPCS_LANE_STATUS_PLLLOCKED_OFFS)
+
+#define MV_XPCS_LANE_STATUS_SIGNALDETECTED_OFFS 2
+#define MV_XPCS_LANE_STATUS_SIGNALDETECTED_MASK \
+ (0x00000001 << MV_XPCS_LANE_STATUS_SIGNALDETECTED_OFFS)
+
+#define MV_XPCS_LANE_STATUS_COMMADETECTED_OFFS 3
+#define MV_XPCS_LANE_STATUS_COMMADETECTED_MASK \
+ (0x00000001 << MV_XPCS_LANE_STATUS_COMMADETECTED_OFFS)
+
+#define MV_XPCS_LANE_STATUS_SYNCOK_OFFS 4
+#define MV_XPCS_LANE_STATUS_SYNCOK_MASK \
+ (0x00000001 << MV_XPCS_LANE_STATUS_SYNCOK_OFFS)
+
+/* Symbol Error Counter */
+#define MV_XPCS_SYMBOL_ERR_CNTR_REG (0x68)
+#define MV_XPCS_SYMBOL_ERR_CNTR_SYMBOLERRCNT_OFFS 0
+#define MV_XPCS_SYMBOL_ERR_CNTR_SYMBOLERRCNT_MASK \
+ (0x0000ffff << MV_XPCS_SYMBOL_ERR_CNTR_SYMBOLERRCNT_OFFS)
+
+/* Disparity Error Counter */
+#define MV_XPCS_DISPARITY_ERR_CNTR_REG (0x6c)
+#define MV_XPCS_DISPARITY_ERR_CNTR_DISPARITYERRCNT_OFFS 0
+#define MV_XPCS_DISPARITY_ERR_CNTR_DISPARITYERRCNT_MASK \
+ (0x0000ffff << MV_XPCS_DISPARITY_ERR_CNTR_DISPARITYERRCNT_OFFS)
+
+/* Prbs Error Counter */
+#define MV_XPCS_PRBS_ERR_CNTR_REG (0x70)
+#define MV_XPCS_PRBS_ERR_CNTR_PRBSERRCNT_OFFS 0
+#define MV_XPCS_PRBS_ERR_CNTR_PRBSERRCNT_MASK \
+ (0x0000ffff << MV_XPCS_PRBS_ERR_CNTR_PRBSERRCNT_OFFS)
+
+/* Rx Packets Counter LSB */
+#define MV_XPCS_RX_PCKTS_CNTR_LSB_REG (0x74)
+#define MV_XPCS_RX_PCKTS_CNTR_LSB_RXPCKTCNTRLSB_OFFS 0
+#define MV_XPCS_RX_PCKTS_CNTR_LSB_RXPCKTCNTRLSB_MASK \
+ (0x0000ffff << MV_XPCS_RX_PCKTS_CNTR_LSB_RXPCKTCNTRLSB_OFFS)
+
+/* Rx Packets Counter MSB */
+#define MV_XPCS_RX_PCKTS_CNTR_MSB_REG (0x78)
+#define MV_XPCS_RX_PCKTS_CNTR_MSB_RXPCKTCNTRMSB_OFFS 0
+#define MV_XPCS_RX_PCKTS_CNTR_MSB_RXPCKTCNTRMSB_MASK \
+ (0x0000ffff << MV_XPCS_RX_PCKTS_CNTR_MSB_RXPCKTCNTRMSB_OFFS)
+
+/* Rx Bad Packets Counter LSB */
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_LSB_REG (0x7c)
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_LSB_RXBADPCKTCNTRLSB_OFFS 0
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_LSB_RXBADPCKTCNTRLSB_MASK \
+ (0x0000ffff << \
+ MV_XPCS_RX_BAD_PCKTS_CNTR_LSB_RXBADPCKTCNTRLSB_OFFS)
+
+/* Rx Bad Packets Counter MSB */
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_MSB_REG (0x80)
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_MSB_RXBADPCKTCNTRMSB_OFFS 0
+#define MV_XPCS_RX_BAD_PCKTS_CNTR_MSB_RXBADPCKTCNTRMSB_MASK \
+ (0x0000ffff << \
+ MV_XPCS_RX_BAD_PCKTS_CNTR_MSB_RXBADPCKTCNTRMSB_OFFS)
+
+/* Cyclic Data 0 */
+#define MV_XPCS_CYCLIC_DATA_0_REG (0x84)
+#define MV_XPCS_CYCLIC_DATA_0_CYCLICDATA0_OFFS 0
+#define MV_XPCS_CYCLIC_DATA_0_CYCLICDATA0_MASK \
+ (0x000003ff << MV_XPCS_CYCLIC_DATA_0_CYCLICDATA0_OFFS)
+
+/* Cyclic Data 1 */
+#define MV_XPCS_CYCLIC_DATA_1_REG (0x88)
+#define MV_XPCS_CYCLIC_DATA_1_CYCLICDATA1_OFFS 0
+#define MV_XPCS_CYCLIC_DATA_1_CYCLICDATA1_MASK \
+ (0x000003ff << MV_XPCS_CYCLIC_DATA_1_CYCLICDATA1_OFFS)
+
+/* Cyclic Data 2 */
+#define MV_XPCS_CYCLIC_DATA_2_REG (0x8c)
+#define MV_XPCS_CYCLIC_DATA_2_CYCLICDATA2_OFFS 0
+#define MV_XPCS_CYCLIC_DATA_2_CYCLICDATA2_MASK \
+ (0x000003ff << MV_XPCS_CYCLIC_DATA_2_CYCLICDATA2_OFFS)
+
+/* Cyclic Data 3 */
+#define MV_XPCS_CYCLIC_DATA_3_REG (0x90)
+#define MV_XPCS_CYCLIC_DATA_3_CYCLICDATA3_OFFS 0
+#define MV_XPCS_CYCLIC_DATA_3_CYCLICDATA3_MASK \
+ (0x000003ff << MV_XPCS_CYCLIC_DATA_3_CYCLICDATA3_OFFS)
+
+/*************/
+/*SERDES REGS */
+/*************/
+
+#define MV_SERDES_CFG_0_REG (0x00)
+
+#define MV_SERDES_CFG_0_PU_PLL_OFFS 1
+#define MV_SERDES_CFG_0_PU_PLL_MASK (0x00000001 << \
+ MV_SERDES_CFG_0_PU_PLL_OFFS)
+#define MV_SERDES_CFG_0_RX_PLL_OFFS 11
+#define MV_SERDES_CFG_0_RX_PLL_MASK (0x00000001 << \
+ MV_SERDES_CFG_0_RX_PLL_OFFS)
+#define MV_SERDES_CFG_0_TX_PLL_OFFS 12
+#define MV_SERDES_CFG_0_TX_PLL_MASK (0x00000001 << \
+ MV_SERDES_CFG_0_TX_PLL_OFFS)
+#define MV_SERDES_CFG_0_MEDIA_MODE_OFFS 15
+#define MV_SERDES_CFG_0_MEDIA_MODE_MASK (0x00000001 << \
+ MV_SERDES_CFG_0_MEDIA_MODE_OFFS)
+
+#define MV_SERDES_CFG_1_REG (0x04)
+#define MV_SERDES_CFG_1_ANALOG_RESET_OFFS 3
+#define MV_SERDES_CFG_1_ANALOG_RESET_MASK \
+ (0x00000001 << MV_SERDES_CFG_1_ANALOG_RESET_OFFS)
+
+#define MV_SERDES_CFG_1_CORE_RESET_OFFS 5
+#define MV_SERDES_CFG_1_CORE_RESET_MASK \
+ (0x00000001 << MV_SERDES_CFG_1_CORE_RESET_OFFS)
+
+#define MV_SERDES_CFG_1_DIGITAL_RESET_OFFS 6
+#define MV_SERDES_CFG_1_DIGITAL_RESET_MASK \
+ (0x00000001 << MV_SERDES_CFG_1_DIGITAL_RESET_OFFS)
+
+#define MV_SERDES_CFG_1_TX_SYNC_EN_OFFS 7
+#define MV_SERDES_CFG_1_TX_SYNC_EN_MASK \
+ (0x00000001 << MV_SERDES_CFG_1_TX_SYNC_EN_OFFS)
+
+#define MV_SERDES_CFG_2_REG (0x08)
+#define MV_SERDES_CFG_3_REG (0x0c)
+#define MV_SERDES_MISC_REG (0x14)
+
+/*************/
+/*SMI REGS */
+/*************/
+
+#define MV_SMI_MANAGEMENT_BUSY_OFFS 28
+#define MV_SMI_MANAGEMENT_BUSY_MASK \
+ (0x1 << MV_SMI_MANAGEMENT_BUSY_OFFS)
+#define MV_SMI_MANAGEMENT_READ_VALID_OFFS 27
+#define MV_SMI_MANAGEMENT_READ_VALID_MASK \
+ (0x1 << MV_SMI_MANAGEMENT_READ_VALID_OFFS)
+#define MV_SMI_MANAGEMENT_OPCODE_OFFS 26
+#define MV_SMI_MANAGEMENT_OPCODE_MASK \
+ (0x1 << MV_SMI_MANAGEMENT_OPCODE_OFFS)
+#define MV_SMI_MANAGEMENT_REGAD_OFFS 21
+#define MV_SMI_MANAGEMENT_REGAD_MASK \
+ (0x1F << MV_SMI_MANAGEMENT_REGAD_OFFS)
+#define MV_SMI_MANAGEMENT_PHYAD_OFFS 16
+#define MV_SMI_MANAGEMENT_PHYAD_MASK \
+ (0x1F << MV_SMI_MANAGEMENT_PHYAD_OFFS)
+#define MV_SMI_MANAGEMENT_DATA_OFFS 0
+#define MV_SMI_MANAGEMENT_DATA_MASK \
+ (0xFFFF << MV_SMI_MANAGEMENT_DATA_OFFS)
+
+/* SMI_MISC_CFG Register */
+#define MV_SMI_MISC_CFG_REG (0x4)
+
+#define MV_SMI_MISC_CFG_SMI_ACCELERATE_OFFS 0
+#define MV_SMI_MISC_CFG_SMI_ACCELERATE_MASK \
+ (0x1 << MV_SMI_MISC_CFG_SMI_ACCELERATE_OFFS)
+#define MV_SMI_MISC_CFG_SMI_FASTMDC_OFFS 1
+#define MV_SMI_MISC_CFG_SMI_FASTMDC_MASK \
+ (0x1 << MV_SMI_MISC_CFG_SMI_FASTMDC_OFFS)
+#define MV_SMI_MISC_CFG_FAST_MDC_DIVISION_SELECTOR_OFFS 2
+#define MV_SMI_MISC_CFG_FAST_MDC_DIVISION_SELECTOR_MASK \
+ (0x3 << MV_SMI_MISC_CFG_FAST_MDC_DIVISION_SELECTOR_OFFS)
+#define MV_SMI_MISC_CFG_ENABLE_MDIO_OUT_LATENCY_OFFS 4
+#define MV_SMI_MISC_CFG_ENABLE_MDIO_OUT_LATENCY_MASK \
+ (0x1 << MV_SMI_MISC_CFG_ENABLE_MDIO_OUT_LATENCY_OFFS)
+#define MV_SMI_MISC_CFG_AUTOPOLLNUMOFPORTS_OFFS 5
+#define MV_SMI_MISC_CFG_AUTOPOLLNUMOFPORTS_MASK \
+ (0x1F << MV_SMI_MISC_CFG_AUTOPOLLNUMOFPORTS_OFFS)
+#define MV_SMI_MISC_CFG_ENABLE_POLLING_OFFS 10
+#define MV_SMI_MISC_CFG_ENABLE_POLLING_MASK \
+ (0x1 << MV_SMI_MISC_CFG_ENABLE_POLLING_OFFS)
+#define MV_SMI_MISC_CFG_INVERT_MDC_OFFS 11
+#define MV_SMI_MISC_CFG_INVERT_MDC_MASK \
+ (0x1 << MV_SMI_MISC_CFG_INVERT_MDC_OFFS)
+
+/* PHY_AN_CFG Register */
+#define MV_SMI_PHY_AN_CFG_REG (0x8)
+
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT0_OFFS 0
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT0_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT0_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT1_OFFS 1
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT1_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT1_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT2_OFFS 2
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT2_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT2_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT3_OFFS 3
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT3_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT3_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT4_OFFS 4
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT4_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT4_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT5_OFFS 5
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT5_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT5_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT6_OFFS 6
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT6_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT6_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT7_OFFS 7
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT7_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT7_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT8_OFFS 8
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT8_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT8_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT9_OFFS 9
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT9_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT9_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT10_OFFS 10
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT10_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT10_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT11_OFFS 11
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT11_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT11_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT12_OFFS 12
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT12_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT12_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT13_OFFS 13
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT13_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT13_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT14_OFFS 14
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT14_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT14_OFFS)
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT15_OFFS 15
+#define MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT15_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_AUTOMEDIA_SELECTEN_PORT15_OFFS)
+#define MV_SMI_PHY_AN_CFG_SKIPSWRESET_SMI_OFFS 16
+#define MV_SMI_PHY_AN_CFG_SKIPSWRESET_SMI_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_SKIPSWRESET_SMI_OFFS)
+#define MV_SMI_PHY_AN_CFG_STOP_AUTONEGSMI_OFFS 17
+#define MV_SMI_PHY_AN_CFG_STOP_AUTONEGSMI_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_STOP_AUTONEGSMI_OFFS)
+#define MV_SMI_PHY_AN_CFG_MASTERSMI_OFFS 18
+#define MV_SMI_PHY_AN_CFG_MASTERSMI_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_MASTERSMI_OFFS)
+#define MV_SMI_PHY_AN_CFG_SGMIIINBANDFCEN_OFFS 19
+#define MV_SMI_PHY_AN_CFG_SGMIIINBANDFCEN_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_SGMIIINBANDFCEN_OFFS)
+#define MV_SMI_PHY_AN_CFG_FCADVSETFIBER_OFFS 20
+#define MV_SMI_PHY_AN_CFG_FCADVSETFIBER_MASK \
+ (0x1 << MV_SMI_PHY_AN_CFG_FCADVSETFIBER_OFFS)
+
+/* PHY_ADDRESS_REGISTER0 Register */
+#define MV_SMI_PHY_ADDRESS_REG(n) (0xC + 0x4 * n)
+#define MV_SMI_PHY_ADDRESS_PHYAD_OFFS 0
+#define MV_SMI_PHY_ADDRESS_PHYAD_MASK \
+ (0x1F << MV_SMI_PHY_ADDRESS_PHYAD_OFFS)
+
+/*************/
+/* MIB REGS */
+/*************/
+
+/* GMAC_MIB Counters register definitions */
+#define MV_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0
+#define MV_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4
+#define MV_MIB_BAD_OCTETS_RECEIVED 0x8
+#define MV_MIB_CRC_ERRORS_SENT 0xc
+#define MV_MIB_UNICAST_FRAMES_RECEIVED 0x10
+/* Reserved 0x14 */
+#define MV_MIB_BROADCAST_FRAMES_RECEIVED 0x18
+#define MV_MIB_MULTICAST_FRAMES_RECEIVED 0x1c
+#define MV_MIB_FRAMES_64_OCTETS 0x20
+#define MV_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define MV_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define MV_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define MV_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define MV_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define MV_MIB_GOOD_OCTETS_SENT_LOW 0x38
+#define MV_MIB_GOOD_OCTETS_SENT_HIGH 0x3c
+#define MV_MIB_UNICAST_FRAMES_SENT 0x40
+/* Reserved 0x44 */
+#define MV_MIB_MULTICAST_FRAMES_SENT 0x48
+#define MV_MIB_BROADCAST_FRAMES_SENT 0x4c
+/* Reserved 0x50 */
+#define MV_MIB_FC_SENT 0x54
+#define MV_MIB_FC_RECEIVED 0x58
+#define MV_MIB_RX_FIFO_OVERRUN 0x5c
+#define MV_MIB_UNDERSIZE_RECEIVED 0x60
+#define MV_MIB_FRAGMENTS_RECEIVED 0x64
+#define MV_MIB_OVERSIZE_RECEIVED 0x68
+#define MV_MIB_JABBER_RECEIVED 0x6c
+#define MV_MIB_MAC_RECEIVE_ERROR 0x70
+#define MV_MIB_BAD_CRC_EVENT 0x74
+#define MV_MIB_COLLISION 0x78
+#define MV_MIB_LATE_COLLISION 0x7c
+
+/******************************************************************************/
+/* System Soft Reset 1 */
+#define MV_GOP_SOFT_RESET_1_REG 0x8
+
+#define NETC_GOP_SOFT_RESET_OFFSET 6
+#define NETC_GOP_SOFT_RESET_MASK (0x1 << NETC_GOP_SOFT_RESET_OFFSET)
+
+/* Ports Control 0 */
+#define MV_NETCOMP_PORTS_CONTROL_0 (0x10)
+
+#define NETC_CLK_DIV_PHASE_OFFSET 31
+#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFSET)
+
+#define NETC_GIG_RX_DATA_SAMPLE_OFFSET 29
+#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << NETC_GIG_RX_DATA_SAMPLE_OFFSET)
+
+#define NETC_PORT3_PAUSE_OFFSET 6
+#define NETC_PORT3_PAUSE_MASK (0x1 << NETC_PORT3_PAUSE_OFFSET)
+
+#define NETC_PORT2_PAUSE_OFFSET 5
+#define NETC_PORT2_PAUSE_MASK (0x1 << NETC_PORT2_PAUSE_OFFSET)
+
+#define NETC_PORT1_PAUSE_OFFSET 4
+#define NETC_PORT1_PAUSE_MASK (0x1 << NETC_PORT1_PAUSE_OFFSET)
+
+#define NETC_PORT0_PAUSE_OFFSET 3
+#define NETC_PORT0_PAUSE_MASK (0x1 << NETC_PORT0_PAUSE_OFFSET)
+
+#define NETC_BUS_WIDTH_SELECT_OFFSET 1
+#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << NETC_BUS_WIDTH_SELECT_OFFSET)
+
+#define NETC_GOP_ENABLE_OFFSET 0
+#define NETC_GOP_ENABLE_MASK (0x1 << NETC_GOP_ENABLE_OFFSET)
+
+/* Ports Control 1 */
+#define MV_NETCOMP_PORTS_CONTROL_1 (0x14)
+
+#define NETC_PORT_GIG_RF_RESET_OFFSET(port) (28 + port)
+#define NETC_PORT_GIG_RF_RESET_MASK(port) \
+ (0x1 << NETC_PORT_GIG_RF_RESET_OFFSET(port))
+
+#define NETC_PORTS_ACTIVE_OFFSET(port) (0 + port)
+#define NETC_PORTS_ACTIVE_MASK(port) (0x1 << NETC_PORTS_ACTIVE_OFFSET(port))
+
+/* Ports Status */
+#define MV_NETCOMP_PORTS_STATUS (0x1C)
+#define NETC_PORTS_STATUS_OFFSET(port) (0 + port)
+#define NETC_PORTS_STATUS_MASK(port) (0x1 << NETC_PORTS_STATUS_OFFSET(port))
+
+/* Networking Complex Control 0 */
+#define MV_NETCOMP_CONTROL_0 (0x20)
+
+#define NETC_GBE_PORT1_MII_MODE_OFFSET 2
+#define NETC_GBE_PORT1_MII_MODE_MASK \
+ (0x1 << NETC_GBE_PORT1_MII_MODE_OFFSET)
+
+#define NETC_GBE_PORT1_SGMII_MODE_OFFSET 1
+#define NETC_GBE_PORT1_SGMII_MODE_MASK \
+ (0x1 << NETC_GBE_PORT1_SGMII_MODE_OFFSET)
+
+#define NETC_GBE_PORT0_SGMII_MODE_OFFSET 0
+#define NETC_GBE_PORT0_SGMII_MODE_MASK \
+ (0x1 << NETC_GBE_PORT0_SGMII_MODE_OFFSET)
+
+/* SD1 Control1 */
+#define SD1_CONTROL_1_REG (0x148)
+
+#define SD1_CONTROL_XAUI_EN_OFFSET 28
+#define SD1_CONTROL_XAUI_EN_MASK (0x1 << SD1_CONTROL_XAUI_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI0_L23_EN_OFFSET 27
+#define SD1_CONTROL_RXAUI0_L23_EN_MASK (0x1 << \
+ SD1_CONTROL_RXAUI0_L23_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI1_L45_EN_OFFSET 26
+#define SD1_CONTROL_RXAUI1_L45_EN_MASK (0x1 << \
+ SD1_CONTROL_RXAUI1_L45_EN_OFFSET)
+
+/******************************************************************************/
+
+#define PCS40G_COMMON_CONTROL (0x014)
+
+#define FORWARD_ERROR_CORRECTION_OFFSET 10
+#define FORWARD_ERROR_CORRECTION_MASK (0x1 << FORWARD_ERROR_CORRECTION_OFFSET)
+
+#define PCS_CLOCK_RESET (0x14C)
+
+#define CLK_DIV_PHASE_SET_OFFSET 11
+#define CLK_DIV_PHASE_SET_MASK (0x1 << CLK_DIV_PHASE_SET_OFFSET)
+
+#define CLK_DIVISION_RATIO_OFFSET 4
+#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFSET)
+
+#define MAC_CLK_RESET_OFFSET 2
+#define MAC_CLK_RESET_MASK (0x1 << MAC_CLK_RESET_OFFSET)
+
+#define RX_SD_CLK_RESET_OFFSET 1
+#define RX_SD_CLK_RESET_MASK (0x1 << RX_SD_CLK_RESET_OFFSET)
+
+#define TX_SD_CLK_RESET_OFFSET 0
+#define TX_SD_CLK_RESET_MASK (0x1 << TX_SD_CLK_RESET_OFFSET)
+
+/***********/
+/*FCA REGS */
+/***********/
+
+/* FCA control */
+#define FCA_CONTROL_REG (0x000)
+#define FCA_RESET_OFFSET 0
+#define FCA_RESET_MASK (0x1 << FCA_RESET_OFFSET)
+
+#define FCA_BYPASS_OFFSET 1
+#define FCA_BYPASS_MASK (0x1 << FCA_BYPASS_OFFSET)
+
+#define FCA_PORT_TYPE_OFFSET 4
+#define FCA_PORT_TYPE_MASK (0x111 << FCA_PORT_TYPE_OFFSET)
+
+#define FCA_SEND_PERIODIC_OFFSET 7
+#define FCA_SEND_PERIODIC_MASK (0x1 << FCA_SEND_PERIODIC_OFFSET)
+
+#define FCA_ENABLE_PERIODIC_OFFSET 11
+#define FCA_ENABLE_PERIODIC_MASK (0x1 << FCA_ENABLE_PERIODIC_OFFSET)
+
+/* FCA periodic timer */
+#define PERIODIC_COUNTER_LSB_REG (0x110)
+#define PERIODIC_COUNTER_MSB_REG (0x114)
+
+#define FCA_PORT_TYPE_A 0x0
+#define FCA_PORT_TYPE_B 0x1
+#define FCA_PORT_TYPE_C 0x2
+#define FCA_PORT_TYPE_D 0x3
+#define FCA_PORT_TYPE_E 0x4
+
+#endif /*_MV_GOP_HW_TYPE_H_*/
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h
new file mode 100644
index 000000000000..fba29f1849f4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x.h
@@ -0,0 +1,834 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MVPP2_H_
+#define _MVPP2_H_
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+
+#include "mv_pp2x_hw_type.h"
+#include "mv_gop110_hw_type.h"
+
+#define MVPP2_DRIVER_NAME "mvpp2x"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+#define PFX MVPP2_DRIVER_NAME ": "
+
+#define IRQ_NAME_SIZE (36)
+
+#define STATS_DELAY 250
+
+#define TSO_TXQ_LIMIT 100
+#define TXQ_LIMIT (MAX_SKB_FRAGS + 2)
+
+#define MV_ETH_SKB_SHINFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* START - Taken from mvPp2Commn.h, need to order TODO */
+/*--------------------------------------------------------------------*/
+/* PP2 COMMON DEFINETIONS */
+/*--------------------------------------------------------------------*/
+
+#define MV_ERROR (-1)
+#define MV_OK (0)
+
+#define WAY_MAX 1
+
+/*--------------------------------------------------------------------*/
+/* PP2 COMMON DEFINETIONS */
+/*--------------------------------------------------------------------*/
+#define NOT_IN_USE (-1)
+#define IN_USE (1)
+#define BYTE_BITS 8
+#define BYTE_MASK 0xFF
+#define DWORD_BITS_LEN 32
+#define DWORD_BYTES_LEN 4
+#define RETRIES_EXCEEDED 15000
+#define ONE_BIT_MAX 1
+#define UNI_MAX 7
+#define ETH_PORTS_NUM 7
+
+/*--------------------------------------------------------------------*/
+/* PNC COMMON DEFINETIONS */
+/*--------------------------------------------------------------------*/
+
+/* HW_BYTE_OFFS
+ * return HW byte offset in 4 bytes register
+ * _offs_: native offset (LE)
+ * LE example: HW_BYTE_OFFS(1) = 1
+ * BE example: HW_BYTE_OFFS(1) = 2
+ */
+
+#if defined(__LITTLE_ENDIAN)
+#define HW_BYTE_OFFS(_offs_) (_offs_)
+#else
+#define HW_BYTE_OFFS(_offs_) ((3 - ((_offs_) % 4)) + (((_offs_) / 4) * 4))
+#endif
+
+#define SRAM_BIT_TO_BYTE(_bit_) HW_BYTE_OFFS((_bit_) / 8)
+
+#define TCAM_DATA_BYTE_OFFS_LE(_offs_) (((_offs_) - \
+ ((_offs_) % 2)) * 2 + ((_offs_) % 2))
+#define TCAM_DATA_MASK_OFFS_LE(_offs_) (((_offs_) * 2) - ((_offs_) % 2) + 2)
+
+/* TCAM_DATA_BYTE/MASK
+ * tcam data devide into 4 bytes registers
+ * each register include 2 bytes of data and 2 bytes of mask
+ * the next macros calc data/mask offset in 4 bytes register
+ * _offs_: native offset (LE) in data bytes array
+ * relevant only for TCAM data bytes
+ * used by PRS and CLS2
+ */
+#define TCAM_DATA_BYTE(_offs_) (HW_BYTE_OFFS(TCAM_DATA_BYTE_OFFS_LE(_offs_)))
+#define TCAM_DATA_MASK(_offs_) (HW_BYTE_OFFS(TCAM_DATA_MASK_OFFS_LE(_offs_)))
+
+/*END - Taken from mvPp2Commn.h, need to order TODO */
+/*--------------------------------------------------------------------*/
+
+#define __FILENAME__ (strrchr(__FILE__, '/') ? \
+ strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#ifdef MVPP2_VERBOSE
+#define MVPP2_PRINT_LINE() \
+ pr_info("Passed: %s(%d)\n", __func__, __LINE__)
+
+#define MVPP2_PRINT_VAR(var) \
+ pr_info("%s (%d): " #var "=0x%lx\n", __func__, __LINE__, (u64)var)
+#define MVPP2_PRINT_VAR_NAME(var, name) \
+ pr_info("%s (%d): %s = 0x%lx\n", __func__, __LINE__, name, var)
+#else
+#define MVPP2_PRINT_LINE()
+#define MVPP2_PRINT_VAR(var)
+#define MVPP2_PRINT_VAR_NAME(var, name)
+#endif
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+ (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+#define MVPP2_QUEUE_DESC_PTR(q, index) \
+ ((q)->first_desc + index)
+
+/* Various constants */
+#define MVPP2_MAX_SW_THREADS 4
+#define MVPP2_MAX_CPUS 4
+#define MVPP2_MAX_SHARED 1
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS 64
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_TXDONE_COAL_USEC 1000
+
+#define MVPP2_RX_COAL_PKTS 32
+#define MVPP2_RX_COAL_USEC 64
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM 16
+#define MVPP2_BM_POOLS_MAX_ALLOC_NUM 4 /* Max num of allowed BM pools
+ * allocations
+ */
+#define MVPP2_BM_POOL_SIZE_MAX (16 * 1024 - \
+ MVPP2_BM_POOL_PTR_ALIGN / 4)
+#define MVPP2_BM_POOL_PTR_ALIGN 128
+
+#define MVPP2_BM_SHORT_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_JUMBO_BUF_NUM 512
+
+#define MVPP2_ALL_BUFS 0
+
+#define RX_TOTAL_SIZE(buf_size) ((buf_size) + MV_ETH_SKB_SHINFO_SIZE)
+#define RX_TRUE_SIZE(total_size) roundup_pow_of_two(total_size)
+extern u32 debug_param;
+
+/* Convert cpu_id to sw_thread_id */
+#define QV_THR_2_CPU(sw_thread_id) (sw_thread_id - first_addr_space)
+#define QV_CPU_2_THR(cpu_id) (first_addr_space + cpu_id)
+
+/* TX FIFO constants */
+#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
+#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
+
+#define MVPP2_TX_FIFO_MINIMUM_THRESHOLD 256
+#define MVPP2_TX_FIFO_THRESHOLD_10KB (MVPP2_TX_FIFO_DATA_SIZE_10KB * 1024 - \
+ MVPP2_TX_FIFO_MINIMUM_THRESHOLD)
+#define MVPP2_TX_FIFO_THRESHOLD_3KB (MVPP2_TX_FIFO_DATA_SIZE_3KB * 1024 - \
+ MVPP2_TX_FIFO_MINIMUM_THRESHOLD)
+
+/* Used for define type of data saved in shadow: SKB or extended buffer or nothing */
+#define MVPP2_ETH_SHADOW_SKB 0x1
+#define MVPP2_ETH_SHADOW_EXT 0x2
+
+#define MVPP2_EXTRA_BUF_SIZE 120
+#define MVPP2_EXTRA_BUF_NUM (MVPP2_MAX_TXD * MVPP2_MAX_TXQ)
+
+enum mvppv2_version {
+ PPV21 = 21,
+ PPV22
+};
+
+enum mv_pp2x_queue_vector_type {
+ MVPP2_SHARED,
+ MVPP2_PRIVATE
+};
+
+enum mv_pp2x_queue_distribution_mode {
+ /* All queues are shared.
+ * PPv2.1: this is the only supported mode.
+ * PPv2.2: Requires (N+1) interrupts. All rx_queues are
+ * configured on the additional interrupt.
+ */
+ MVPP2_QDIST_SINGLE_MODE,
+ MVPP2_QDIST_MULTI_MODE /* PPv2.2 only requires N interrupts */
+};
+
+enum mv_pp2x_cos_classifier {
+ MVPP2_COS_CLS_VLAN, /* CoS based on VLAN pri */
+ MVPP2_COS_CLS_DSCP,
+ MVPP2_COS_CLS_VLAN_DSCP, /* CoS based on VLAN pri, */
+ /*if untagged and IP, then based on DSCP */
+ MVPP2_COS_CLS_DSCP_VLAN
+};
+
+enum mv_pp2x_rss_nf_udp_mode {
+ MVPP2_RSS_NF_UDP_2T, /* non-frag UDP packet hash value
+ * is calculated based on 2T
+ */
+ MVPP2_RSS_NF_UDP_5T /* non-frag UDP packet hash value
+ *is calculated based on 5T
+ */
+};
+
+struct gop_stat {
+ u64 rx_byte;
+ u64 rx_unicast;
+ u64 rx_mcast;
+ u64 rx_bcast;
+ u64 rx_frames;
+ u64 rx_pause;
+ u64 rx_overrun;
+ u64 rx_crc;
+ u64 rx_runt;
+ u64 rx_giant;
+ u64 rx_fragments_err;
+ u64 rx_mac_err;
+ u64 rx_jabber;
+ u64 rx_total_err;
+ u64 tx_byte;
+ u64 tx_unicast;
+ u64 tx_mcast;
+ u64 tx_bcast;
+ u64 tx_frames;
+ u64 tx_pause;
+ u64 tx_crc_sent;
+ u64 collision;
+ u64 late_collision;
+};
+
+struct mv_mac_data {
+ u8 gop_index;
+ struct gop_stat gop_statistics;
+ u64 flags;
+ /* Whether a PHY is present, and if yes, at which address. */
+ int phy_addr;
+ phy_interface_t phy_mode; /* RXAUI, SGMII, etc. */
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+ int link_irq;
+ char irq_name[IRQ_NAME_SIZE];
+ bool force_link;
+ u32 autoneg;
+ u32 link;
+ u32 duplex;
+ u32 speed;
+};
+
+/* Masks used for pp3_emac flags */
+#define MV_EMAC_F_LINK_UP_BIT 0
+#define MV_EMAC_F_INIT_BIT 1
+#define MV_EMAC_F_SGMII2_5_BIT 2
+#define MV_EMAC_F_PORT_UP_BIT 3
+
+#define MV_EMAC_F_LINK_UP BIT(MV_EMAC_F_LINK_UP_BIT)
+#define MV_EMAC_F_INIT BIT(MV_EMAC_F_INIT_BIT)
+#define MV_EMAC_F_SGMII2_5 BIT(MV_EMAC_F_SGMII2_5_BIT)
+#define MV_EMAC_F_PORT_UP BIT(MV_EMAC_F_PORT_UP_BIT)
+
+#define MVPP2_NO_LINK_IRQ 0
+
+/* Per-CPU Tx queue control */
+struct mv_pp2x_txq_pcpu {
+ int cpu;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the
+ * descriptor ring
+ */
+ int count;
+
+ /* Number of Tx DMA descriptors reserved for each CPU */
+ int reserved_num;
+
+ /* Array of transmitted skb */
+ struct sk_buff **tx_skb;
+
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
+ int *data_size;
+
+ /* Index of last TX DMA descriptor that was inserted */
+ int txq_put_index;
+
+ /* Index of the TX DMA descriptor to be cleaned up */
+ int txq_get_index;
+};
+
+struct mv_pp2x_tx_queue {
+ /* Physical number of this Tx queue */
+ u8 id;
+
+ /* Logical number of this Tx queue */
+ u8 log_id;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Per-CPU control of physical Tx queues */
+ struct mv_pp2x_txq_pcpu __percpu *pcpu;
+
+ u32 pkts_coal;
+
+ /* Virtual pointer to address of the Tx DMA descriptors
+ * memory_allocation
+ */
+ void *desc_mem;
+
+ /* Virtual address of thex Tx DMA descriptors array */
+ struct mv_pp2x_tx_desc *first_desc;
+
+ /* DMA address of the Tx DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last Tx DMA descriptor */
+ int last_desc;
+
+ /* Index of the next Tx DMA descriptor to process */
+ int next_desc_to_proc;
+};
+
+struct mv_pp2x_aggr_tx_queue {
+ /* Physical number of this Tx queue */
+ u8 id;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ int size;
+
+ /* Number of currently used Tx DMA descriptor in the descriptor ring */
+ int count;
+
+ /* Virtual pointer to address of the Aggr_Tx DMA descriptors
+ * memory_allocation
+ */
+ void *desc_mem;
+
+ /* Virtual pointer to address of the Aggr_Tx DMA descriptors array */
+ struct mv_pp2x_tx_desc *first_desc;
+
+ /* DMA address of the Tx DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last Tx DMA descriptor */
+ int last_desc;
+
+ /* Index of the next Tx DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* Used to statistic the desc number to xmit in bulk */
+ u32 xmit_bulk;
+};
+
+struct mv_pp2x_rx_queue {
+ /* RX queue number, in the range 0-31 for physical RXQs */
+ u8 id;
+
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ int log_id;
+
+ /* Num of rx descriptors in the rx descriptor ring */
+ int size;
+
+ u32 pkts_coal;
+ u32 time_coal;
+
+ /* Virtual pointer to address of the Rx DMA descriptors
+ * memory_allocation
+ */
+ void *desc_mem;
+
+ /* Virtual address of the RX DMA descriptors array */
+ struct mv_pp2x_rx_desc *first_desc;
+
+ /* DMA address of the RX DMA descriptors array */
+ dma_addr_t descs_phys;
+
+ /* Index of the last RX DMA descriptor */
+ int last_desc;
+
+ /* Index of the next RX DMA descriptor to process */
+ int next_desc_to_proc;
+
+ /* ID of port to which physical RXQ is mapped */
+ int port;
+
+};
+
+struct avanta_lp_gop_hw {
+ void __iomem *lms_base;
+};
+
+struct mv_mac_unit_desc {
+ void __iomem *base;
+ u32 obj_size;
+};
+
+struct cpn110_gop_hw {
+ struct mv_mac_unit_desc gmac;
+ struct mv_mac_unit_desc xlg_mac;
+ struct mv_mac_unit_desc xmib;
+ struct mv_mac_unit_desc tai;
+ struct mv_mac_unit_desc ptp;
+ struct mv_mac_unit_desc fca;
+ void __iomem *smi_base;
+ void __iomem *xsmi_base;
+ void __iomem *mspg_base;
+ void __iomem *xpcs_base;
+ void __iomem *rfu1_base;
+
+};
+
+struct gop_hw {
+ union {
+ struct avanta_lp_gop_hw gop_alp;
+ struct cpn110_gop_hw gop_110;
+ };
+};
+
+struct mv_pp2x_hw {
+ /* Shared registers' base addresses */
+ void __iomem *base; /* PPV22 base_address as received in
+ *devm_ioremap_resource().
+ */
+ void __iomem *lms_base;
+ void __iomem *cpu_base[MVPP2_MAX_CPUS];
+
+ phys_addr_t phys_addr_start;
+ phys_addr_t phys_addr_end;
+
+ struct gop_hw gop;
+ /* ppv22_base_address for each CPU.
+ * PPv2.2 - cpu_base[x] = base +
+ * cpu_index[smp_processor_id]*MV_PP2_SPACE_64K,
+ * for non-participating CPU it is NULL.
+ * PPv2.1 cpu_base[x] = base
+ */
+ /* Common clocks */
+ struct clk *pp_clk;
+ struct clk *gop_clk;
+ struct clk *gop_core_clk;
+ struct clk *mg_clk;
+ struct clk *mg_core_clk;
+
+ u32 tclk;
+
+ /* PRS shadow table */
+ struct mv_pp2x_prs_shadow *prs_shadow;
+ /* PRS auxiliary table for double vlan entries control */
+ bool *prs_double_vlans;
+ /* CLS shadow info for update in running time */
+ struct mv_pp2x_cls_shadow *cls_shadow;
+ /* C2 shadow info */
+ struct mv_pp2x_c2_shadow *c2_shadow;
+};
+
+struct mv_pp2x_cos {
+ u8 cos_classifier; /* CoS based on VLAN or DSCP */
+ u8 num_cos_queues; /* number of queue to do CoS */
+ u8 default_cos; /* Default CoS value for non-IP or non-VLAN */
+ u8 reserved;
+ u32 pri_map; /* 32 bits, each nibble maps a cos_value(0~7)
+ * to a queue.
+ */
+};
+
+struct mv_pp2x_rss {
+ u8 rss_mode; /*UDP packet */
+ u8 dflt_cpu; /*non-IP packet */
+ u8 rss_en;
+};
+
+struct mv_pp2x_param_config {
+ u8 first_bm_pool;
+ u8 first_sw_thread; /* The index of the first PPv2.2
+ * sub-address space for this NET_INSTANCE.
+ */
+ u8 first_log_rxq; /* The first cos rx queue used in the port */
+ u8 cell_index; /* The cell_index of the PPv22
+ * (could be 0,1, set according to dtsi)
+ */
+ enum mv_pp2x_queue_distribution_mode queue_mode;
+ u32 rx_cpu_map; /* The CPU that port bind, each port has a nibble
+ * indexed by port_id, nibble value is CPU id
+ */
+ u8 uc_filter_max; /* The unicast filter list max, multiple of 4 */
+ u8 mc_filter_max; /* The multicast filter list max, multiple of 4 */
+};
+
+/* Shared Packet Processor resources */
+struct mv_pp2x {
+ enum mvppv2_version pp2_version; /* Redundant, consider to delete.
+ * (prevents extra pointer lookup from
+ * mv_pp2x_platform_data)
+ */
+ struct mv_pp2x_hw hw;
+ struct mv_pp2x_platform_data *pp2xdata;
+
+ u16 cpu_map; /* Bitmap of the participating cpu's */
+
+ struct mv_pp2x_param_config pp2_cfg;
+
+ /* List of pointers to port structures */
+ u16 num_ports;
+ struct mv_pp2x_port **port_list;
+
+ /* Aggregated TXQs */
+ u16 num_aggr_qs;
+ struct mv_pp2x_aggr_tx_queue *aggr_txqs;
+
+ /* BM pools */
+ u16 num_pools;
+ struct mv_pp2x_bm_pool *bm_pools;
+
+ /* RX flow hash indir'n table, in pp22, the table contains the
+ * CPU idx according to weight
+ */
+ u32 rx_indir_table[MVPP22_RSS_TBL_LINE_NUM];
+ u32 l4_chksum_jumbo_port;
+
+ struct delayed_work stats_task;
+ struct workqueue_struct *workqueue;
+};
+
+struct mv_pp2x_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+/* Per-CPU port control */
+struct mv_pp2x_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+ int ext_buf_size;
+ struct list_head ext_buf_port_list;
+ struct mv_pp2x_ext_buf_pool *ext_buf_pool;
+};
+
+struct queue_vector {
+ u32 irq;
+ char irq_name[IRQ_NAME_SIZE];
+ struct napi_struct napi;
+ enum mv_pp2x_queue_vector_type qv_type;
+ u16 sw_thread_id; /* address_space index used to
+ * retrieve interrupt_cause
+ */
+ u16 sw_thread_mask; /* Mask for Interrupt PORT_ENABLE Register */
+ u8 first_rx_queue; /* Relative to port */
+ u8 num_rx_queues;
+ u32 pending_cause_rx; /* mask in absolute port_queues, not relative as
+ * in Ethernet Occupied Interrupt Cause (EthOccIC))
+ */
+ struct mv_pp2x_port *parent;
+};
+
+struct mv_pp2x_ptp_desc; /* per-port private PTP descriptor */
+
+struct mv_pp2x_port {
+ u8 id;
+
+ u8 num_irqs;
+ u32 *of_irqs;
+
+ struct mv_pp2x *priv;
+
+ struct mv_mac_data mac_data;
+ struct tasklet_struct link_change_tasklet;
+
+ /* Per-port registers' base address */
+ void __iomem *base;
+
+ /* Index of port's first physical RXQ */
+ u8 first_rxq;
+
+ /* port's number of rx_queues */
+ u8 num_rx_queues;
+ /* port's number of tx_queues */
+ u8 num_tx_queues;
+
+ struct mv_pp2x_rx_queue **rxqs; /*Each Port has up tp 32 rxq_queues.*/
+ struct mv_pp2x_tx_queue **txqs;
+ struct net_device *dev;
+
+ int pkt_size; /* pkt_size determines which is pool_long:
+ * jumbo_pool or regular long_pool.
+ */
+
+ /* Per-CPU port control */
+ struct mv_pp2x_port_pcpu __percpu *pcpu;
+ /* Flags */
+ u64 flags;
+
+ u16 tx_ring_size;
+ u16 rx_ring_size;
+
+ u32 tx_time_coal;
+ struct mv_pp2x_pcpu_stats __percpu *stats;
+
+ struct mv_pp2x_bm_pool *pool_long; /* Pointer to the pool_id
+ * (long or jumbo)
+ */
+ struct mv_pp2x_bm_pool *pool_short; /* Pointer to the short pool_id */
+
+ struct phy *comphy; /* comphy handler */
+ int txq_stop_limit;
+
+ u32 num_qvector;
+ /* q_vector is the parameter that will be passed to
+ * mv_pp2_isr(int irq, void *dev_id=q_vector)
+ */
+ struct queue_vector q_vector[MVPP2_MAX_CPUS + MVPP2_MAX_SHARED];
+
+ struct mv_pp2x_ptp_desc *ptp_desc;
+ struct mv_pp2x_cos cos_cfg;
+ struct mv_pp2x_rss rss_cfg;
+};
+
+struct pp2x_hw_params {
+ u8 desc_queue_addr_shift;
+};
+
+struct mv_pp2x_platform_data {
+ enum mvppv2_version pp2x_ver;
+ u8 pp2x_max_port_rxqs;
+ u8 num_port_irq;
+ bool multi_addr_space;
+ bool interrupt_tx_done;
+ bool multi_hw_instance;
+ void (*mv_pp2x_rxq_short_pool_set)(struct mv_pp2x_hw *, int, int);
+ void (*mv_pp2x_rxq_long_pool_set)(struct mv_pp2x_hw *, int, int);
+ void (*mv_pp2x_port_queue_vectors_init)(struct mv_pp2x_port *);
+ void (*mv_pp2x_port_isr_rx_group_cfg)(struct mv_pp2x_port *);
+ struct pp2x_hw_params hw;
+};
+
+struct mv_pp2x_ext_buf_struct {
+ struct list_head ext_buf_list;
+ u8 *ext_buf_data;
+};
+
+struct mv_pp2x_ext_buf_pool {
+ int buf_pool_size;
+ int buf_pool_next_free;
+ int buf_pool_in_use;
+ struct mv_pp2x_ext_buf_struct *ext_buf_struct;
+};
+
+static inline u32 usec_to_cycles(u32 usec, unsigned long clock_rate_hz)
+{
+ u64 tmp = clock_rate_hz * usec;
+
+ do_div(tmp, USEC_PER_SEC);
+
+ return tmp > 0xffffffff ? 0xffffffff : tmp;
+}
+
+static inline u32 cycles_to_usec(u32 cycles, unsigned long clock_rate_hz)
+{
+ u64 tmp = cycles * USEC_PER_SEC;
+
+ do_div(tmp, clock_rate_hz);
+
+ return tmp > 0xffffffff ? 0xffffffff : tmp;
+}
+
+static inline struct mv_pp2x_port *mv_pp2x_port_struct_get(struct mv_pp2x *priv,
+ int port)
+{
+ int i;
+
+ for (i = 0; i < priv->num_ports; i++) {
+ if (priv->port_list[i]->id == port)
+ return priv->port_list[i];
+ }
+ return NULL;
+}
+
+static inline
+struct mv_pp2x_port *mv_pp2x_port_struct_get_by_gop_index(struct mv_pp2x *priv,
+ int gop_index)
+{
+ int i;
+
+ for (i = 0; i < priv->num_ports; i++) {
+ if (priv->port_list[i]->mac_data.gop_index == gop_index)
+ return priv->port_list[i];
+ }
+ return NULL;
+}
+
+static inline u8 mv_pp2x_cosval_queue_map(struct mv_pp2x_port *port,
+ u8 cos_value)
+{
+ int cos_width, cos_mask;
+
+ cos_width = ilog2(roundup_pow_of_two(
+ port->cos_cfg.num_cos_queues));
+ cos_mask = (1 << cos_width) - 1;
+
+ return((port->cos_cfg.pri_map >>
+ (cos_value * 4)) & cos_mask);
+}
+
+static inline u8 mv_pp2x_bound_cpu_first_rxq_calc(struct mv_pp2x_port *port)
+{
+ u8 cos_width, bind_cpu;
+
+ cos_width = ilog2(roundup_pow_of_two(
+ port->cos_cfg.num_cos_queues));
+ bind_cpu = (port->priv->pp2_cfg.rx_cpu_map >> (4 * port->id)) & 0xF;
+
+ return(port->first_rxq + (bind_cpu << cos_width));
+}
+
+/* Swap RX descriptor to be BE */
+static inline void mv_pp21_rx_desc_swap(struct mv_pp2x_rx_desc *rx_desc)
+{
+ cpu_to_le32s(&rx_desc->status);
+ cpu_to_le16s(&rx_desc->rsrvd_parser);
+ cpu_to_le16s(&rx_desc->data_size);
+ cpu_to_le32s(&rx_desc->u.pp21.buf_phys_addr);
+ cpu_to_le32s(&rx_desc->u.pp21.buf_cookie);
+ cpu_to_le16s(&rx_desc->u.pp21.rsrvd_gem);
+ cpu_to_le16s(&rx_desc->u.pp21.rsrvd_l4csum);
+ cpu_to_le16s(&rx_desc->u.pp21.rsrvd_cls_info);
+ cpu_to_le32s(&rx_desc->u.pp21.rsrvd_flow_id);
+ cpu_to_le32s(&rx_desc->u.pp21.rsrvd_abs);
+}
+
+static inline void mv_pp22_rx_desc_swap(struct mv_pp2x_rx_desc *rx_desc)
+{
+ cpu_to_le32s(&rx_desc->status);
+ cpu_to_le16s(&rx_desc->rsrvd_parser);
+ cpu_to_le16s(&rx_desc->data_size);
+ cpu_to_le16s(&rx_desc->u.pp22.rsrvd_gem);
+ cpu_to_le16s(&rx_desc->u.pp22.rsrvd_l4csum);
+ cpu_to_le32s(&rx_desc->u.pp22.rsrvd_timestamp);
+ cpu_to_le64s(&rx_desc->u.pp22.buf_phys_addr_key_hash);
+ cpu_to_le64s(&rx_desc->u.pp22.buf_cookie_bm_qset_cls_info);
+}
+
+/* Swap TX descriptor to be BE */
+static inline void mv_pp21_tx_desc_swap(struct mv_pp2x_tx_desc *tx_desc)
+{
+ cpu_to_le32s(&tx_desc->command);
+ cpu_to_le16s(&tx_desc->data_size);
+ cpu_to_le32s(&tx_desc->u.pp21.buf_phys_addr);
+ cpu_to_le32s(&tx_desc->u.pp21.buf_cookie);
+ cpu_to_le32s(&tx_desc->u.pp21.rsrvd_hw_cmd[0]);
+ cpu_to_le32s(&tx_desc->u.pp21.rsrvd_hw_cmd[1]);
+ cpu_to_le32s(&tx_desc->u.pp21.rsrvd_hw_cmd[2]);
+ cpu_to_le32s(&tx_desc->u.pp21.rsrvd1);
+}
+
+static inline void mv_pp22_tx_desc_swap(struct mv_pp2x_tx_desc *tx_desc)
+{
+ cpu_to_le32s(&tx_desc->command);
+ cpu_to_le16s(&tx_desc->data_size);
+ cpu_to_le64s(&tx_desc->u.pp22.rsrvd_hw_cmd1);
+ cpu_to_le64s(&tx_desc->u.pp22.buf_phys_addr_hw_cmd2);
+ cpu_to_le64s(&tx_desc->u.pp22.buf_cookie_bm_qset_hw_cmd3);
+}
+
+struct mv_pp2x_pool_attributes {
+ char description[32];
+ int pkt_size;
+ int buf_num;
+};
+
+char *mv_pp2x_pool_description_get(enum mv_pp2x_bm_pool_log_num log_id);
+
+void mv_pp2x_bm_bufs_free(struct device *dev, struct mv_pp2x *priv,
+ struct mv_pp2x_bm_pool *bm_pool, int buf_num);
+int mv_pp2x_bm_bufs_add(struct mv_pp2x_port *port,
+ struct mv_pp2x_bm_pool *bm_pool, int buf_num);
+int mv_pp2x_bm_pool_ext_add(struct device *dev, struct mv_pp2x *priv,
+ u32 *pool_num, u32 pkt_size);
+int mv_pp2x_bm_pool_destroy(struct device *dev, struct mv_pp2x *priv,
+ struct mv_pp2x_bm_pool *bm_pool);
+int mv_pp2x_swf_bm_pool_assign(struct mv_pp2x_port *port, u32 rxq,
+ u32 long_id, u32 short_id);
+int mv_pp2x_open(struct net_device *dev);
+int mv_pp2x_stop(struct net_device *dev);
+void mv_pp2x_txq_inc_put(enum mvppv2_version pp2_ver,
+ struct mv_pp2x_txq_pcpu *txq_pcpu,
+ struct sk_buff *skb,
+ struct mv_pp2x_tx_desc *tx_desc);
+int mv_pp2x_check_ringparam_valid(struct net_device *dev,
+ struct ethtool_ringparam *ring);
+void mv_pp2x_start_dev(struct mv_pp2x_port *port);
+void mv_pp2x_stop_dev(struct mv_pp2x_port *port);
+void mv_pp2x_cleanup_rxqs(struct mv_pp2x_port *port);
+int mv_pp2x_setup_rxqs(struct mv_pp2x_port *port);
+int mv_pp2x_setup_txqs(struct mv_pp2x_port *port);
+void mv_pp2x_cleanup_txqs(struct mv_pp2x_port *port);
+void mv_pp2x_set_ethtool_ops(struct net_device *netdev);
+int mv_pp22_rss_rxfh_indir_set(struct mv_pp2x_port *port);
+int mv_pp2x_cos_classifier_set(struct mv_pp2x_port *port,
+ enum mv_pp2x_cos_classifier cos_mode);
+int mv_pp2x_cos_classifier_get(struct mv_pp2x_port *port);
+int mv_pp2x_cos_pri_map_set(struct mv_pp2x_port *port, int cos_pri_map);
+int mv_pp2x_cos_pri_map_get(struct mv_pp2x_port *port);
+int mv_pp2x_cos_default_value_set(struct mv_pp2x_port *port, int cos_value);
+int mv_pp2x_cos_default_value_get(struct mv_pp2x_port *port);
+int mv_pp22_rss_mode_set(struct mv_pp2x_port *port, int rss_mode);
+int mv_pp22_rss_default_cpu_set(struct mv_pp2x_port *port, int default_cpu);
+int mv_pp2x_txq_reserved_desc_num_proc(struct mv_pp2x *priv,
+ struct mv_pp2x_tx_queue *txq,
+ struct mv_pp2x_txq_pcpu *txq_pcpu,
+ int num, int cpu);
+
+#endif /*_MVPP2_H_*/
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.c b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.c
new file mode 100644
index 000000000000..d3f2344d5d75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.c
@@ -0,0 +1,98 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "mv_pp2x.h"
+#include "mv_pp2x_hw.h"
+#include "mv_pp2x_debug.h"
+
+int mv_pp2x_debug_param_set(u32 param)
+{
+ debug_param = param;
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_debug_param_set);
+
+int mv_pp2x_debug_param_get(void)
+{
+ return debug_param;
+}
+EXPORT_SYMBOL(mv_pp2x_debug_param_get);
+
+/* Extra debug */
+void mv_pp2x_skb_dump(struct sk_buff *skb, int size, int access)
+{
+ int i, j;
+ void *addr = skb->head + NET_SKB_PAD;
+ uintptr_t mem_addr = (uintptr_t)addr;
+
+ DBG_MSG("skb=%p, buf=%p, ksize=%d\n", skb, skb->head,
+ (int)ksize(skb->head));
+
+ if (access == 0)
+ access = 1;
+
+ if ((access != 4) && (access != 2) && (access != 1)) {
+ pr_err("%d wrong access size. Access must be 1 or 2 or 4\n",
+ access);
+ return;
+ }
+ mem_addr = round_down((uintptr_t)addr, 4);
+ size = round_up(size, 4);
+ addr = (void *)round_down((uintptr_t)addr, access);
+
+ while (size > 0) {
+ DBG_MSG("%08lx: ", mem_addr);
+ i = 0;
+ /* 32 bytes in the line */
+ while (i < 32) {
+ if (mem_addr >= (uintptr_t)addr) {
+ switch (access) {
+ case 1:
+ DBG_MSG("%02x ",
+ ioread8((void *)mem_addr));
+ break;
+
+ case 2:
+ DBG_MSG("%04x ",
+ ioread16((void *)mem_addr));
+ break;
+
+ case 4:
+ DBG_MSG("%08x ",
+ ioread32((void *)mem_addr));
+ break;
+ }
+ } else {
+ for (j = 0; j < (access * 2 + 1); j++)
+ DBG_MSG(" ");
+ }
+ i += access;
+ mem_addr += access;
+ size -= access;
+ if (size <= 0)
+ break;
+ }
+ DBG_MSG("\n");
+ }
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.h b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.h
new file mode 100644
index 000000000000..8717b0180242
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_debug.h
@@ -0,0 +1,33 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MVPP2_DEBUG_H_
+#define _MVPP2_DEBUG_H_
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+#define DBG_MSG(fmt, args...) printk(fmt, ## args)
+
+void mv_pp2x_skb_dump(struct sk_buff *skb, int size, int access);
+
+int mv_pp2x_debug_param_set(u32 param);
+
+int mv_pp2x_debug_param_get(void);
+
+#endif /* _MVPP2_DEBUG_H_ */
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c
new file mode 100644
index 000000000000..d5cc5f5e6bee
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_ethtool.c
@@ -0,0 +1,1245 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/phy/phy.h>
+#include <dt-bindings/phy/phy-mvebu-comphy.h>
+
+#include "mv_pp2x.h"
+#include "mv_pp2x_hw.h"
+#include "mv_gop110_hw.h"
+
+#define MV_PP2_STATS_LEN ARRAY_SIZE(mv_pp2x_gstrings_stats)
+#define MV_PP2_TEST_LEN ARRAY_SIZE(mv_pp2x_gstrings_test)
+#define MV_PP2_REGS_GMAC_LEN 54
+#define MV_PP2_REGS_XLG_LEN 25
+#define MV_PP2_TEST_MASK1 0xFFFF
+#define MV_PP2_TEST_MASK2 0x00FE
+#define MV_PP2_TEST_MASK3 0x0
+#define MV_PP2_TEST_PATTERN1 0xFFFF
+#define MV_PP2_TEST_PATTERN2 0x00FE
+#define MV_PP2_TEST_PATTERN3 0x0
+
+static const char mv_pp2x_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Link test (on/offline)",
+ "register test (on/offline)",
+};
+
+static const char mv_pp2x_gstrings_stats[][ETH_GSTRING_LEN] = {
+ /* device-specific stats */
+ "rx_bytes", "rx_frames", "rx_unicast", "rx_mcast", "rx_bcast",
+ "tx_bytes", "tx_frames", "tx_unicast", "tx_mcast", "tx_bcast",
+ "rx_pause", "tx_pause", "rx_overrun", "rx_crc", "rx_runt", "rx_giant",
+ "rx_fragments_err", "rx_mac_err", "rx_jabber", "rx_sw_drop", "rx_total_err",
+ "tx_drop", "tx_crc_sent", "collision", "late_collision",
+};
+
+int mv_pp2x_check_speed_duplex_valid(const struct ethtool_link_ksettings *kset,
+ struct mv_port_link_status *pstatus)
+{
+ switch (kset->base.duplex) {
+ case DUPLEX_FULL:
+ pstatus->duplex = MV_PORT_DUPLEX_FULL;
+ break;
+ case DUPLEX_HALF:
+ pstatus->duplex = MV_PORT_DUPLEX_HALF;
+ break;
+ case DUPLEX_UNKNOWN:
+ if (kset->base.speed == SPEED_1000) {
+ pstatus->duplex = MV_PORT_DUPLEX_FULL;
+ } else {
+ pstatus->duplex = MV_PORT_DUPLEX_FULL;
+ pr_err("Unknown duplex configuration, full duplex set\n");
+ }
+ break;
+ default:
+ pr_err("Wrong duplex configuration\n");
+ return -1;
+ }
+
+ switch (kset->base.speed) {
+ case SPEED_100:
+ pstatus->speed = MV_PORT_SPEED_100;
+ return 0;
+ case SPEED_10:
+ pstatus->speed = MV_PORT_SPEED_10;
+ return 0;
+ case SPEED_1000:
+ pstatus->speed = MV_PORT_SPEED_1000;
+ if (kset->base.duplex)
+ return 0;
+ pr_err("1G port doesn't support half duplex\n");
+ return -1;
+ default:
+ pr_err("Wrong speed configuration\n");
+ return -1;
+ }
+}
+
+int mv_pp2x_autoneg_gmac_check_valid(struct mv_mac_data *mac, struct gop_hw *gop,
+ const struct ethtool_link_ksettings *kset, struct mv_port_link_status *pstatus)
+{
+ int port_num = mac->gop_index;
+ int err;
+
+ err = mv_gop110_check_port_type(gop, port_num);
+ if (err) {
+ if (kset->base.autoneg) {
+ pr_err("GOP %d set to 1000Base-X and doesn't support autonegotiation\n", port_num);
+ return -EINVAL;
+ }
+ return 0;
+ }
+ if (!kset->base.autoneg) {
+ err = mv_pp2x_check_speed_duplex_valid(kset, pstatus);
+ if (err)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mv_pp2x_autoneg_xlg_check_valid(struct mv_mac_data *mac, const struct ethtool_link_ksettings *kset)
+{
+ int port_num = mac->gop_index;
+
+ if (kset->base.autoneg) {
+ pr_err("XLG GOP %d doesn't support autonegotiation\n", port_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mv_pp2x_ethtool_valid_coalesce(struct ethtool_coalesce *c,
+ struct mv_pp2x_port *port)
+{
+ u64 val;
+
+ if (c->rx_max_coalesced_frames > MVPP2_MAX_OCCUPIED_THRESH)
+ pr_err("RX coalesced frames value too high, rounded to %d\n",
+ MVPP2_MAX_OCCUPIED_THRESH);
+
+ if (c->tx_max_coalesced_frames > MVPP2_MAX_TRANSMITTED_THRESH) {
+ pr_err("TX coalesced frames value too high, rounded to %d\n",
+ MVPP2_MAX_TRANSMITTED_THRESH);
+ c->tx_max_coalesced_frames = MVPP2_MAX_TRANSMITTED_THRESH;
+ }
+
+ val = usec_to_cycles(c->rx_coalesce_usecs, port->priv->hw.tclk);
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ c->rx_coalesce_usecs =
+ cycles_to_usec(MVPP22_MAX_ISR_TX_THRESHOLD,
+ port->priv->hw.tclk);
+ pr_err("RX coalesced time value too high, rounded to %d usecs\n",
+ c->rx_coalesce_usecs);
+ }
+
+ val = usec_to_cycles(c->tx_coalesce_usecs, port->priv->hw.tclk);
+ if (val > MVPP22_MAX_ISR_TX_THRESHOLD) {
+ c->tx_coalesce_usecs =
+ cycles_to_usec(MVPP22_MAX_ISR_TX_THRESHOLD,
+ port->priv->hw.tclk);
+ pr_err("TX coalesced time value too high, rounded to %d usecs\n",
+ c->tx_coalesce_usecs);
+ }
+}
+
+/* Ethtool methods */
+
+/* Ethtool statistic */
+static void mv_pp2x_eth_tool_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_mac_data *mac = &port->mac_data;
+ struct gop_hw *gop = &port->priv->hw.gop;
+ int gop_port = mac->gop_index;
+ struct gop_stat *gop_statistics = &mac->gop_statistics;
+ int i = 0;
+
+ if (port->priv->pp2_version == PPV21)
+ return;
+
+ mv_gop110_mib_counters_stat_update(gop, gop_port, gop_statistics);
+
+ data[i++] = gop_statistics->rx_byte;
+ data[i++] = gop_statistics->rx_frames;
+ data[i++] = gop_statistics->rx_unicast;
+ data[i++] = gop_statistics->rx_mcast;
+ data[i++] = gop_statistics->rx_bcast;
+ data[i++] = gop_statistics->tx_byte;
+ data[i++] = gop_statistics->tx_frames;
+ data[i++] = gop_statistics->tx_unicast;
+ data[i++] = gop_statistics->tx_mcast;
+ data[i++] = gop_statistics->tx_bcast;
+ data[i++] = gop_statistics->rx_pause;
+ data[i++] = gop_statistics->tx_pause;
+ data[i++] = gop_statistics->rx_overrun;
+ data[i++] = gop_statistics->rx_crc;
+ data[i++] = gop_statistics->rx_runt;
+ data[i++] = gop_statistics->rx_giant;
+ data[i++] = gop_statistics->rx_fragments_err;
+ data[i++] = gop_statistics->rx_mac_err;
+ data[i++] = gop_statistics->rx_jabber;
+ data[i++] = dev->stats.rx_dropped;
+ data[i++] = gop_statistics->rx_total_err + dev->stats.rx_dropped;
+ data[i++] = dev->stats.tx_dropped;
+ data[i++] = gop_statistics->tx_crc_sent;
+ data[i++] = gop_statistics->collision;
+ data[i++] = gop_statistics->late_collision;
+}
+
+static void mv_pp2x_eth_tool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *mv_pp2x_gstrings_test, sizeof(mv_pp2x_gstrings_test));
+ break;
+ case ETH_SS_STATS:
+ memcpy(data, *mv_pp2x_gstrings_stats, sizeof(mv_pp2x_gstrings_stats));
+ break;
+ default:
+ break;
+ }
+}
+
+static int mv_pp2x_eth_tool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return MV_PP2_TEST_LEN;
+ case ETH_SS_STATS:
+ return MV_PP2_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Restart autonegotiation function */
+int mv_pp2x_eth_tool_nway_reset(struct net_device *dev)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct gop_hw *gop = &port->priv->hw.gop;
+ struct mv_mac_data *mac = &port->mac_data;
+ int err;
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ if (!(mac->flags & MV_EMAC_F_INIT)) {
+ pr_err("%s: interface %s is not initialized\n", __func__, dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ err = mv_gop110_check_port_type(gop, mac->gop_index);
+ if (err) {
+ pr_err("GOP %d set to 1000Base-X\n", mac->gop_index);
+ return -EINVAL;
+ }
+ mv_gop110_autoneg_restart(gop, mac);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ pr_err("XLG GOP %d doesn't support autonegotiation\n", mac->gop_index);
+ return -ENODEV;
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)\n", __func__, mac->phy_mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Get pause fc settings for ethtools */
+static void mv_pp2x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_port_link_status status;
+ struct mv_mac_data *mac = &port->mac_data;
+ struct gop_hw *gop = &port->priv->hw.gop;
+ int gop_port = mac->gop_index;
+ phy_interface_t phy_mode;
+
+ if (port->priv->pp2_version == PPV21)
+ return;
+
+ phy_mode = port->mac_data.phy_mode;
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ mv_gop110_port_link_status(gop, mac, &status);
+ pause->autoneg =
+ (status.autoneg_fc ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_gop110_port_link_status(gop, mac, &status);
+ pause->autoneg = AUTONEG_DISABLE;
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, phy_mode);
+ return;
+ }
+
+ if (status.rx_fc == MV_PORT_FC_ACTIVE || status.rx_fc == MV_PORT_FC_ENABLE)
+ pause->rx_pause = 1;
+
+ if (mv_gop110_check_fca_tx_state(gop, gop_port)) {
+ pause->tx_pause = 1;
+ return;
+ }
+
+ if (status.tx_fc == MV_PORT_FC_ACTIVE || status.tx_fc == MV_PORT_FC_ENABLE)
+ pause->tx_pause = 1;
+}
+
+/* Set pause fc settings for ethtools */
+static int mv_pp2x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_mac_data *mac = &port->mac_data;
+ struct gop_hw *gop = &port->priv->hw.gop;
+ int gop_port = mac->gop_index;
+ phy_interface_t phy_mode;
+ int err;
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ if (!(mac->flags & MV_EMAC_F_INIT)) {
+ pr_err("%s: interface %s is not initialized\n", __func__, dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ phy_mode = port->mac_data.phy_mode;
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (mac->speed == SPEED_2500) {
+ err = mv_gop110_check_port_type(gop, gop_port);
+ if (err) {
+ pr_err("Peridot module doesn't support FC\n");
+ return -EINVAL;
+ }
+ }
+
+ mv_gop110_force_link_mode_set(gop, mac, false, true);
+
+ if (pause->autoneg) {
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_AN_SYM);
+ mv_gop110_autoneg_restart(gop, mac);
+ mv_gop110_fca_send_periodic(gop, gop_port, false);
+ }
+ else {
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_AN_NO);
+ mv_gop110_fca_send_periodic(gop, gop_port, true);
+ }
+
+ if (pause->rx_pause)
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_RX_ENABLE);
+ else
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_RX_DISABLE);
+
+ if (pause->tx_pause) {
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_TX_ENABLE);
+ mv_gop110_fca_tx_enable(gop, gop_port, false);
+ }
+ else {
+ mv_gop110_gmac_fc_set(gop, gop_port, MV_PORT_FC_TX_DISABLE);
+ mv_gop110_fca_tx_enable(gop, gop_port, true);
+ }
+
+ mv_gop110_force_link_mode_set(gop, mac, false, false);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ if (pause->autoneg) {
+ pr_err("10G port doesn't support fc autoneg\n");
+ return -EINVAL;
+ }
+ if (pause->rx_pause)
+ mv_gop110_xlg_mac_fc_set(gop, gop_port, MV_PORT_FC_RX_ENABLE);
+ else
+ mv_gop110_xlg_mac_fc_set(gop, gop_port, MV_PORT_FC_RX_DISABLE);
+
+ if (pause->tx_pause) {
+ mv_gop110_fca_tx_enable(gop, gop_port, false);
+ } else {
+ mv_gop110_xlg_mac_fc_set(gop, gop_port, MV_PORT_FC_TX_DISABLE);
+ mv_gop110_fca_tx_enable(gop, gop_port, true);
+ }
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, phy_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Get settings (phy address, speed) for ethtools */
+static int mv_pp2x_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_port_link_status status;
+ phy_interface_t phy_mode;
+
+ if (port->priv->pp2_version == PPV21) {
+ if (!port->mac_data.phy_dev)
+ return -ENODEV;
+ return phy_ethtool_get_link_ksettings(dev, kset);
+ }
+
+ /* No Phy device mngmt */
+ if (!port->mac_data.phy_dev) {
+ /*for force link port, RXAUI port and link-down ports,
+ * follow old strategy
+ */
+
+ mv_gop110_port_link_status(&port->priv->hw.gop,
+ &port->mac_data, &status);
+
+ if (status.linkup) {
+ switch (status.speed) {
+ case MV_PORT_SPEED_10000:
+ kset->base.speed = SPEED_10000;
+ break;
+ case MV_PORT_SPEED_1000:
+ kset->base.speed = SPEED_1000;
+ break;
+ case MV_PORT_SPEED_100:
+ kset->base.speed = SPEED_100;
+ break;
+ case MV_PORT_SPEED_10:
+ kset->base.speed = SPEED_10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (status.duplex == MV_PORT_DUPLEX_FULL)
+ kset->base.duplex = 1;
+ else
+ kset->base.duplex = 0;
+ } else {
+ kset->base.speed = SPEED_UNKNOWN;
+ kset->base.duplex = SPEED_UNKNOWN;
+ }
+
+ phy_mode = port->mac_data.phy_mode;
+ ethtool_link_ksettings_zero_link_mode(kset, supported);
+ ethtool_link_ksettings_zero_link_mode(kset, advertising);
+ if ((phy_mode == PHY_INTERFACE_MODE_XAUI) ||
+ (phy_mode == PHY_INTERFACE_MODE_RXAUI) ||
+ (phy_mode == PHY_INTERFACE_MODE_10GKR)) {
+ kset->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ FIBRE);
+ kset->base.port = PORT_FIBRE;
+ } else {
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ TP);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ MII);
+ ethtool_link_ksettings_add_link_mode(kset, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ TP);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ MII);
+ ethtool_link_ksettings_add_link_mode(kset, advertising,
+ 1000baseT_Full);
+ kset->base.port = PORT_MII;
+
+ /* check if speed and duplex are AN */
+ if (mv_gop110_port_autoneg_status(&port->priv->hw.gop,
+ &port->mac_data)) {
+ kset->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ kset->base.autoneg = AUTONEG_DISABLE;
+ }
+ }
+
+ return 0;
+ }
+
+ return phy_ethtool_get_link_ksettings(dev, kset);
+}
+
+void mv_pp2x_ethtool_set_gmac_config(struct mv_port_link_status status, struct gop_hw *gop,
+ int gop_port, struct mv_mac_data *mac,
+ const struct ethtool_link_ksettings *kset)
+{
+ mv_gop110_force_link_mode_set(gop, mac, false, true);
+ mv_gop110_gmac_set_autoneg(gop, mac, kset->base.autoneg);
+ if (kset->base.autoneg)
+ mv_gop110_autoneg_restart(gop, mac);
+ else
+ mv_gop110_gmac_speed_duplex_set(gop, gop_port, status.speed, status.duplex);
+ mv_gop110_force_link_mode_set(gop, mac, false, false);
+}
+
+int mv_pp2x_get_new_comphy_mode(const struct ethtool_link_ksettings *kset, int port_id)
+{
+ if (kset->base.speed == SPEED_10000 && port_id == 0)
+ return COMPHY_DEF(COMPHY_SFI_MODE, port_id);
+ else if (kset->base.speed == SPEED_2500)
+ return COMPHY_DEF(COMPHY_HS_SGMII_MODE, port_id);
+ else if (kset->base.speed == SPEED_1000 || kset->base.speed == SPEED_100 ||
+ kset->base.speed == SPEED_10)
+ return COMPHY_DEF(COMPHY_SGMII_MODE, port_id);
+ else
+ return -EINVAL;
+}
+
+void mv_pp2x_set_new_phy_mode(const struct ethtool_link_ksettings *kset, struct mv_mac_data *mac)
+{
+ if (kset->base.speed == SPEED_10000) {
+ mac->phy_mode = PHY_INTERFACE_MODE_10GKR;
+ } else if (kset->base.speed == SPEED_2500) {
+ mac->phy_mode = PHY_INTERFACE_MODE_SGMII;
+ mac->speed = SPEED_2500;
+ mac->flags |= MV_EMAC_F_SGMII2_5;
+ } else {
+ mac->phy_mode = PHY_INTERFACE_MODE_SGMII;
+ mac->speed = SPEED_1000;
+ mac->flags &= ~MV_EMAC_F_SGMII2_5;
+ }
+}
+
+/* Set settings (phy address, speed) for ethtools */
+static int mv_pp2x_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ int err;
+ struct mv_port_link_status status;
+ struct gop_hw *gop = &port->priv->hw.gop;
+ struct mv_mac_data *mac = &port->mac_data;
+ int gop_port = mac->gop_index;
+ bool phy_mode_update = false;
+
+ if (port->priv->pp2_version == PPV21)
+ if (!port->mac_data.phy_dev)
+ return -ENODEV;
+
+ if (port->mac_data.phy_dev)
+ return phy_ethtool_set_link_ksettings(dev, kset);
+
+ if (port->comphy) {
+ int comphy_old_mode, comphy_new_mode;
+
+ comphy_new_mode = mv_pp2x_get_new_comphy_mode(kset, port->id);
+
+ if (comphy_new_mode < 0) {
+ pr_err("Port ID %d: unsupported speed set\n", port->id);
+ return comphy_new_mode;
+ }
+ comphy_old_mode = phy_get_mode(port->comphy);
+
+ if (comphy_old_mode != comphy_new_mode) {
+ err = phy_set_mode(port->comphy, comphy_new_mode);
+ if (err < 0) {
+ phy_set_mode(port->comphy, comphy_old_mode);
+ pr_err("Port ID %d: COMPHY lane is busy\n", port->id);
+ return err;
+ }
+
+ if (mac->flags & MV_EMAC_F_PORT_UP) {
+ netif_carrier_off(port->dev);
+ mv_gop110_port_events_mask(gop, mac);
+ mv_gop110_port_disable(gop, mac);
+ phy_power_off(port->comphy);
+ }
+
+ mv_pp2x_set_new_phy_mode(kset, mac);
+ phy_mode_update = true;
+ }
+ }
+
+ if (phy_mode_update) {
+ if (mac->flags & MV_EMAC_F_INIT) {
+ mac->flags &= ~MV_EMAC_F_INIT;
+ mvcpn110_mac_hw_init(port);
+ }
+ mv_pp22_set_net_comp(port->priv);
+
+ if (mac->flags & MV_EMAC_F_PORT_UP) {
+ mv_gop110_port_events_unmask(gop, mac);
+ mv_gop110_port_enable(gop, mac);
+ phy_power_on(port->comphy);
+ }
+ }
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ err = mv_pp2x_autoneg_gmac_check_valid(mac, gop, kset, &status);
+ if (err < 0)
+ return err;
+ if (kset->base.speed != SPEED_2500)
+ mv_pp2x_ethtool_set_gmac_config(status, gop, gop_port, mac, kset);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ mv_pp2x_autoneg_xlg_check_valid(mac, kset);
+ if (err < 0)
+ return err;
+ break;
+ default:
+ pr_err("Wrong port mode (%d)\n", mac->phy_mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mv_pp2x_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ int queue;
+
+ /* Check for not supported parameters */
+ if ((c->rx_coalesce_usecs_irq) ||
+ (c->rx_max_coalesced_frames_irq) ||
+ (c->tx_coalesce_usecs_irq) ||
+ (c->tx_max_coalesced_frames_irq) ||
+ (c->stats_block_coalesce_usecs) ||
+ (c->use_adaptive_rx_coalesce) ||
+ (c->use_adaptive_tx_coalesce) ||
+ (c->pkt_rate_low) ||
+ (c->rx_coalesce_usecs_low) ||
+ (c->rx_max_coalesced_frames_low) ||
+ (c->tx_coalesce_usecs_low) ||
+ (c->tx_max_coalesced_frames_low) ||
+ (c->pkt_rate_high) ||
+ (c->rx_coalesce_usecs_high) ||
+ (c->rx_max_coalesced_frames_high) ||
+ (c->tx_coalesce_usecs_high) ||
+ (c->tx_max_coalesced_frames_high) ||
+ (c->rate_sample_interval)) {
+ netdev_err(dev, "unsupported coalescing parameter\n");
+ return -EOPNOTSUPP;
+ }
+
+ mv_pp2x_ethtool_valid_coalesce(c, port);
+
+ for (queue = 0; queue < port->num_rx_queues; queue++) {
+ struct mv_pp2x_rx_queue *rxq = port->rxqs[queue];
+
+ rxq->time_coal = c->rx_coalesce_usecs;
+ rxq->pkts_coal = c->rx_max_coalesced_frames;
+ mv_pp2x_rx_pkts_coal_set(port, rxq);
+ mv_pp2x_rx_time_coal_set(port, rxq);
+ }
+ port->tx_time_coal = c->tx_coalesce_usecs;
+ for (queue = 0; queue < port->num_tx_queues; queue++) {
+ struct mv_pp2x_tx_queue *txq = port->txqs[queue];
+
+ txq->pkts_coal = c->tx_max_coalesced_frames;
+ }
+ if (port->priv->pp2xdata->interrupt_tx_done) {
+ mv_pp2x_tx_done_time_coal_set(port, port->tx_time_coal);
+ on_each_cpu(mv_pp2x_tx_done_pkts_coal_set, port, 1);
+ }
+
+ return 0;
+}
+
+/* get coalescing for ethtools */
+static int mv_pp2x_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
+ c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+ c->tx_max_coalesced_frames = port->txqs[0]->pkts_coal;
+ c->tx_coalesce_usecs = port->tx_time_coal;
+
+ return 0;
+}
+
+static void mv_pp2x_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void mv_pp2x_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ ring->rx_max_pending = MVPP2_MAX_RXD;
+ ring->tx_max_pending = MVPP2_MAX_TXD;
+ ring->rx_pending = port->rx_ring_size;
+ ring->tx_pending = port->tx_ring_size;
+}
+
+static int mv_pp2x_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ u16 prev_rx_ring_size = port->rx_ring_size;
+ u16 prev_tx_ring_size = port->tx_ring_size;
+ int err;
+
+ err = mv_pp2x_check_ringparam_valid(dev, ring);
+ if (err)
+ return err;
+
+ if (!netif_running(dev)) {
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+ return 0;
+ }
+
+ /* The interface is running, so we have to force a
+ * reallocation of the queues
+ */
+ mv_pp2x_stop_dev(port);
+ mv_pp2x_cleanup_rxqs(port);
+ mv_pp2x_cleanup_txqs(port);
+
+ port->rx_ring_size = ring->rx_pending;
+ port->tx_ring_size = ring->tx_pending;
+
+ err = mv_pp2x_setup_rxqs(port);
+ if (err) {
+ /* Reallocate Rx queues with the original ring size */
+ port->rx_ring_size = prev_rx_ring_size;
+ ring->rx_pending = prev_rx_ring_size;
+ err = mv_pp2x_setup_rxqs(port);
+ if (err)
+ goto err_out;
+ }
+ err = mv_pp2x_setup_txqs(port);
+ if (err) {
+ /* Reallocate Tx queues with the original ring size */
+ port->tx_ring_size = prev_tx_ring_size;
+ ring->tx_pending = prev_tx_ring_size;
+ err = mv_pp2x_setup_txqs(port);
+ if (err)
+ goto err_clean_rxqs;
+ }
+
+ mv_pp2x_start_dev(port);
+
+ return 0;
+
+err_clean_rxqs:
+ mv_pp2x_cleanup_rxqs(port);
+err_out:
+ netdev_err(dev, "fail to change ring parameters");
+ return err;
+}
+
+static u32 mv_pp2x_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(port->priv->rx_indir_table);
+}
+
+static int mv_pp2x_get_rss_hash_opts(struct mv_pp2x_port *port,
+ struct ethtool_rxnfc *nfc)
+{
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ nfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ nfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ if (port->rss_cfg.rss_mode == MVPP2_RSS_NF_UDP_5T)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ nfc->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mv_pp2x_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rules)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ if (port->priv->pp2_cfg.queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -EOPNOTSUPP;
+
+ if (!port)
+ return -EIO;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = ARRAY_SIZE(port->priv->rx_indir_table);
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = mv_pp2x_get_rss_hash_opts(port, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mv_pp2x_set_rss_hash_opt(struct mv_pp2x_port *port,
+ struct ethtool_rxnfc *nfc)
+{
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ mv_pp22_rss_mode_set(port, MVPP2_RSS_NF_UDP_2T);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ mv_pp22_rss_mode_set(port, MVPP2_RSS_NF_UDP_5T);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mv_pp2x_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ /* Single mode doesn't support RSS features */
+ if (port->priv->pp2_cfg.queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = mv_pp2x_set_rss_hash_opt(port, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mv_pp2x_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ size_t copy_size;
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ /* Single mode doesn't support RSS features */
+ if (port->priv->pp2_cfg.queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -EOPNOTSUPP;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ copy_size = ARRAY_SIZE(port->priv->rx_indir_table);
+ memcpy(indir, port->priv->rx_indir_table, copy_size * sizeof(u32));
+
+ return 0;
+}
+
+static int mv_pp2x_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ int i, err;
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ /* Single mode doesn't support RSS features */
+ if (port->priv->pp2_cfg.queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ return -EOPNOTSUPP;
+
+ /* We require at least one supported parameter to be changed
+ * and no change in any of the unsupported parameters
+ */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(port->priv->rx_indir_table); i++)
+ port->priv->rx_indir_table[i] = indir[i];
+
+ err = mv_pp22_rss_rxfh_indir_set(port);
+ if (err) {
+ netdev_err(dev, "fail to change rxfh indir table");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv_pp2x_ethtool_get_regs_len(struct net_device *dev)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_mac_data *mac = &port->mac_data;
+
+ if (port->priv->pp2_version == PPV21)
+ return -EOPNOTSUPP;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return MV_PP2_REGS_GMAC_LEN * sizeof(u32);
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ return MV_PP2_REGS_XLG_LEN * sizeof(u32);
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return -1;
+ }
+}
+
+/*ethtool get registers function */
+static void mv_pp2x_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ struct mv_mac_data *mac = &port->mac_data;
+
+ if (port->priv->pp2_version == PPV21)
+ return;
+
+ if (!port) {
+ netdev_err(dev, "%s is not supported on %s\n",
+ __func__, dev->name);
+ return;
+ }
+
+ regs->version = port->priv->pp2_version;
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ memset(p, 0, MV_PP2_REGS_GMAC_LEN * sizeof(u32));
+ mv_gop110_gmac_registers_dump(port, p);
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ memset(p, 0, MV_PP2_REGS_XLG_LEN * sizeof(u32));
+ mv_gop110_xlg_registers_dump(port, p);
+ break;
+ default:
+ pr_err("%s: Wrong port mode (%d)", __func__, mac->phy_mode);
+ return;
+ }
+}
+
+static u64 mv_pp2x_eth_tool_link_test(struct mv_pp2x_port *port)
+{
+ struct mv_port_link_status status;
+
+ pr_info("Link testing starting\n");
+
+ mv_gop110_port_link_status(&port->priv->hw.gop,
+ &port->mac_data, &status);
+
+ if (status.linkup)
+ return 0;
+ return 1;
+}
+
+static bool mv_pp2x_reg_pattern_test(void *reg, u32 offset, u32 mask, u32 write)
+{
+ static const u32 test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 read, old;
+ int i;
+
+ if (!mask)
+ return false;
+ old = mv_gop_gen_read(reg, offset);
+
+ for (i = 0; i < ARRAY_SIZE(test); i++) {
+ mv_gop_gen_write(reg, offset, write & test[i]);
+ read = mv_gop_gen_read(reg, offset);
+ if (read != (write & test[i] & mask)) {
+ pr_err("pattern test reg %p(test 0x%08X write 0x%08X mask 0x%08X) failed: ",
+ reg, test[i], write, mask);
+ pr_err("got 0x%08X expected 0x%08X\n", read, (write & test[i] & mask));
+ mv_gop_gen_write(reg, offset, old);
+ return true;
+ }
+ }
+
+ mv_gop_gen_write(reg, offset, old);
+
+ return false;
+}
+
+static u64 mv_pp2x_eth_tool_reg_test(struct mv_pp2x_port *port)
+{
+ int ind;
+ int err = 0;
+ struct mv_mac_data *mac = &port->mac_data;
+ int gop_port = mac->gop_index;
+ struct gop_hw *gop = &port->priv->hw.gop;
+ void *reg = gop->gop_110.gmac.base + gop_port * gop->gop_110.gmac.obj_size;
+
+ pr_info("Register testing starting\n");
+
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_CTRL0_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_CTRL1_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_CTRL2_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_AUTO_NEG_CFG_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_STATUS0_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERIAL_PARAM_CFG_REG, MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_FIFO_CFG_0_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_FIFO_CFG_1_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERDES_CFG0_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERDES_CFG1_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERDES_CFG2_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERDES_CFG3_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_PRBS_STATUS_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_PRBS_ERR_CNTR_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_STATUS1_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_MIB_CNTRS_CTRL_REG, MV_PP2_TEST_MASK2, MV_PP2_TEST_PATTERN2);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_CTRL3_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_QSGMII_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_QSGMII_STATUS_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_QSGMII_PRBS_CNTR_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ for (ind = 0; ind < 8; ind++)
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_CCFC_PORT_SPEED_TIMER_REG(ind), MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+
+ for (ind = 0; ind < 4; ind++)
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_FC_DSA_TAG_REG(ind), MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_0, MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LINK_LEVEL_FLOW_CTRL_WINDOW_REG_1, MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_CTRL4_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PORT_SERIAL_PARAM_1_CFG_REG, MV_PP2_TEST_MASK1,
+ MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LPI_CTRL_0_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LPI_CTRL_1_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LPI_CTRL_2_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LPI_STATUS_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_LPI_CNTR_REG, MV_PP2_TEST_MASK3, MV_PP2_TEST_PATTERN3);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PULSE_1_MS_LOW_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_PULSE_1_MS_HIGH_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_INTERRUPT_MASK_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+ err += mv_pp2x_reg_pattern_test(reg, MV_GMAC_INTERRUPT_SUM_MASK_REG, MV_PP2_TEST_MASK1, MV_PP2_TEST_PATTERN1);
+
+ if (err)
+ return 1;
+ return 0;
+}
+
+static void mv_pp2x_eth_tool_diag_test(struct net_device *netdev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct mv_pp2x_port *port = netdev_priv(netdev);
+ int i;
+ struct mv_mac_data *mac = &port->mac_data;
+
+ if (port->priv->pp2_version == PPV21)
+ return;
+
+ if (!(mac->flags & MV_EMAC_F_INIT)) {
+ pr_err("%s: interface %s is not initialized\n", __func__, netdev->name);
+ for (i = 0; i < MV_PP2_TEST_LEN; i++)
+ data[i] = -ENONET;
+ test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ memset(data, 0, MV_PP2_TEST_LEN * sizeof(u64));
+
+ switch (mac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ break;
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_10GKR:
+ pr_err("10G Phy mode (%d) do not support test\n", mac->phy_mode);
+ return;
+ default:
+ pr_err("%s: Wrong port mode (%d\n)", __func__, mac->phy_mode);
+ return;
+ }
+
+ data[0] = mv_pp2x_eth_tool_link_test(port);
+ data[1] = mv_pp2x_eth_tool_reg_test(port);
+ for (i = 0; i < MV_PP2_TEST_LEN; i++)
+ test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
+
+ msleep_interruptible(4 * 1000);
+}
+
+static const struct ethtool_ops mv_pp2x_eth_tool_ops = {
+ .get_link = ethtool_op_get_link,
+ .set_coalesce = mv_pp2x_ethtool_set_coalesce,
+ .get_coalesce = mv_pp2x_ethtool_get_coalesce,
+ .nway_reset = mv_pp2x_eth_tool_nway_reset,
+ .get_drvinfo = mv_pp2x_ethtool_get_drvinfo,
+ .get_ethtool_stats = mv_pp2x_eth_tool_get_ethtool_stats,
+ .get_sset_count = mv_pp2x_eth_tool_get_sset_count,
+ .get_strings = mv_pp2x_eth_tool_get_strings,
+ .get_ringparam = mv_pp2x_ethtool_get_ringparam,
+ .set_ringparam = mv_pp2x_ethtool_set_ringparam,
+ .get_pauseparam = mv_pp2x_get_pauseparam,
+ .set_pauseparam = mv_pp2x_set_pauseparam,
+ .get_rxfh_indir_size = mv_pp2x_ethtool_get_rxfh_indir_size,
+ .get_rxnfc = mv_pp2x_ethtool_get_rxnfc,
+ .set_rxnfc = mv_pp2x_ethtool_set_rxnfc,
+ .get_rxfh = mv_pp2x_ethtool_get_rxfh,
+ .set_rxfh = mv_pp2x_ethtool_set_rxfh,
+ .get_regs_len = mv_pp2x_ethtool_get_regs_len,
+ .get_regs = mv_pp2x_ethtool_get_regs,
+ .self_test = mv_pp2x_eth_tool_diag_test,
+ .get_link_ksettings = mv_pp2x_ethtool_get_link_ksettings,
+ .set_link_ksettings = mv_pp2x_ethtool_set_link_ksettings,
+};
+
+void mv_pp2x_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &mv_pp2x_eth_tool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c
new file mode 100644
index 000000000000..a356ac4eadba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.c
@@ -0,0 +1,6204 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/inetdevice.h>
+#include <uapi/linux/ppp_defs.h>
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mv_pp2x.h"
+#include "mv_pp2x_hw.h"
+
+/* Utility/helper methods */
+
+/*#define MVPP2_REG_BUF_SIZE (sizeof(last_used)/sizeof(last_used[0]))*/
+#define MVPP2_REG_BUF_SIZE ARRAY_SIZE(last_used)
+
+int mv_pp2x_ptr_validate(const void *ptr)
+{
+ if (!ptr) {
+ pr_err("%s: null pointer.\n", __func__);
+ return MV_ERROR;
+ }
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_ptr_validate);
+
+int mv_pp2x_range_validate(int value, int min, int max)
+{
+ if (((value) > (max)) || ((value) < (min))) {
+ pr_err("%s: value 0x%X (%d) is out of range [0x%X , 0x%X].\n",
+ __func__, (value), (value), (min), (max));
+ return MV_ERROR;
+ }
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_range_validate);
+
+/* Parser configuration routines */
+
+/* Flow ID definetion array */
+static struct mv_pp2x_prs_flow_id
+ mv_pp2x_prs_flow_id_array[MVPP2_PRS_FL_TCAM_NUM] = {
+ /***********#Flow ID#**************#Result Info#************/
+ {MVPP2_PRS_FL_IP4_TCP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_UDP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_TCP_NF_TAG, {MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_NF_TAG, {MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_NF_TAG, {MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_UDP_NF_TAG, {MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_NF_TAG, {MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_NF_TAG, {MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_TCP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_TCP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_UDP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_UDP_NF_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_TCP_NF_TAG, {MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_TCP_NF_TAG, {MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_UDP_NF_TAG, {MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_UDP_NF_TAG, {MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_FALSE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TCP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4_OPT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UDP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP4_OTHER |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_TCP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_TCP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_UDP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_UDP_FRAG_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_TCP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_TCP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_UDP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP6 |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_UDP_FRAG_TAG, {MVPP2_PRS_RI_L3_IP6_EXT |
+ MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_IP_FRAG_MASK |
+ MVPP2_PRS_RI_L4_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP4_TAG, {MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TAG, {MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP4_TAG, {MVPP2_PRS_RI_L3_IP4_OTHER,
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_UNTAG, {MVPP2_PRS_RI_VLAN_NONE |
+ MVPP2_PRS_RI_L3_IP6_EXT,
+ MVPP2_PRS_RI_VLAN_MASK |
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_IP6_TAG, {MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+ {MVPP2_PRS_FL_IP6_TAG, {MVPP2_PRS_RI_L3_IP6_EXT,
+ MVPP2_PRS_RI_L3_PROTO_MASK} },
+
+ {MVPP2_PRS_FL_NON_IP_UNTAG, {MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK} },
+
+ {MVPP2_PRS_FL_NON_IP_TAG, {0, 0} },
+};
+
+/* Array of bitmask to indicate flow id attribute */
+static int mv_pp2x_prs_flow_id_attr_tbl[MVPP2_PRS_FL_LAST];
+
+/* Update parser tcam and sram hw entries */
+int mv_pp2x_prs_hw_write(struct mv_pp2x_hw *hw, struct mv_pp2x_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ /* Clear entry invalidation bit */
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+ /* Write tcam index - indirect access */
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+ /* Write sram index - indirect access */
+ mv_pp2x_write(hw, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mv_pp2x_write(hw, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_hw_write);
+
+/* Read tcam entry from hw */
+int mv_pp2x_prs_hw_read(struct mv_pp2x_hw *hw, struct mv_pp2x_prs_entry *pe)
+{
+ int i;
+
+ if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+ return -EINVAL;
+
+ /* Write tcam index - indirect access */
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+ pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mv_pp2x_read(hw,
+ MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+ if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+ return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ pe->tcam.word[i] = mv_pp2x_read(hw, MVPP2_PRS_TCAM_DATA_REG(i));
+
+ /* Write sram index - indirect access */
+ mv_pp2x_write(hw, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ pe->sram.word[i] = mv_pp2x_read(hw, MVPP2_PRS_SRAM_DATA_REG(i));
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_hw_read);
+
+void mv_pp2x_prs_sw_clear(struct mv_pp2x_prs_entry *pe)
+{
+ memset(pe, 0, sizeof(struct mv_pp2x_prs_entry));
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_clear);
+
+/* Invalidate tcam hw entry */
+void mv_pp2x_prs_hw_inv(struct mv_pp2x_hw *hw, int index)
+{
+ /* Write index - indirect access */
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_IDX_REG, index);
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+ MVPP2_PRS_TCAM_INV_MASK);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_hw_inv);
+
+/* Enable shadow table entry and set its lookup ID */
+static void mv_pp2x_prs_shadow_set(struct mv_pp2x_hw *hw, int index, int lu)
+{
+ hw->prs_shadow[index].valid = true;
+ hw->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mv_pp2x_prs_shadow_ri_set(struct mv_pp2x_hw *hw, int index,
+ unsigned int ri,
+ unsigned int ri_mask)
+{
+ hw->prs_shadow[index].ri_mask = ri_mask;
+ hw->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+void mv_pp2x_prs_tcam_lu_set(struct mv_pp2x_prs_entry *pe, unsigned int lu)
+{
+ unsigned int offset = MVPP2_PRS_TCAM_LU_BYTE;
+ unsigned int enable_off =
+ MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+ pe->tcam.byte[HW_BYTE_OFFS(offset)] = lu;
+ pe->tcam.byte[HW_BYTE_OFFS(enable_off)] = MVPP2_PRS_LU_MASK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_tcam_lu_set);
+
+/* Update mask for single port in tcam sw entry */
+void mv_pp2x_prs_tcam_port_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int port, bool add)
+{
+ int enable_off =
+ HW_BYTE_OFFS(MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE));
+
+ if (add)
+ pe->tcam.byte[enable_off] &= ~(1 << port);
+ else
+ pe->tcam.byte[enable_off] |= 1 << port;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_tcam_port_set);
+
+/* Update port map in tcam sw entry */
+void mv_pp2x_prs_tcam_port_map_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int ports)
+{
+ unsigned char port_mask = MVPP2_PRS_PORT_MASK;
+ int enable_off =
+ HW_BYTE_OFFS(MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE));
+
+ pe->tcam.byte[HW_BYTE_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)] = 0;
+ pe->tcam.byte[enable_off] &= ~port_mask;
+ pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_tcam_port_map_set);
+
+/* Obtain port map from tcam sw entry */
+static unsigned int mv_pp2x_prs_tcam_port_map_get(struct mv_pp2x_prs_entry *pe)
+{
+ int enable_off =
+ HW_BYTE_OFFS(MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE));
+
+ return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+void mv_pp2x_prs_tcam_data_byte_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int offs,
+ unsigned char byte,
+ unsigned char enable)
+{
+ pe->tcam.byte[TCAM_DATA_BYTE(offs)] = byte;
+ pe->tcam.byte[TCAM_DATA_MASK(offs)] = enable;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_tcam_data_byte_set);
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mv_pp2x_prs_tcam_data_byte_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int offs,
+ unsigned char *byte,
+ unsigned char *enable)
+{
+ *byte = pe->tcam.byte[TCAM_DATA_BYTE(offs)];
+ *enable = pe->tcam.byte[TCAM_DATA_MASK(offs)];
+}
+
+/* Set dword of data and its enable bits in tcam sw entry */
+static void mv_pp2x_prs_tcam_data_dword_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int offs,
+ unsigned int word,
+ unsigned int enable)
+{
+ int index, offset;
+ unsigned char byte, byte_mask;
+
+ for (index = 0; index < 4; index++) {
+ offset = (offs * 4) + index;
+ byte = ((unsigned char *)&word)[HW_BYTE_OFFS(index)];
+ byte_mask = ((unsigned char *)&enable)[HW_BYTE_OFFS(index)];
+ mv_pp2x_prs_tcam_data_byte_set(pe, offset, byte, byte_mask);
+ }
+}
+
+/* Get dword of data and its enable bits from tcam sw entry */
+static void mv_pp2x_prs_tcam_data_dword_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int offs,
+ unsigned int *word,
+ unsigned int *enable)
+{
+ int index, offset;
+ unsigned char byte, mask;
+
+ for (index = 0; index < 4; index++) {
+ offset = (offs * 4) + index;
+ mv_pp2x_prs_tcam_data_byte_get(pe, offset, &byte, &mask);
+ ((unsigned char *)word)[HW_BYTE_OFFS(index)] = byte;
+ ((unsigned char *)enable)[HW_BYTE_OFFS(index)] = mask;
+ }
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mv_pp2x_prs_tcam_data_cmp(struct mv_pp2x_prs_entry *pe, int offs,
+ u16 data)
+{
+ u16 tcam_data;
+
+ tcam_data = (pe->tcam.byte[TCAM_DATA_BYTE(offs + 1)] << 8) | pe->tcam.byte[TCAM_DATA_BYTE(offs)];
+ if (tcam_data != data)
+ return false;
+ return true;
+}
+
+/* Update ai bits in tcam sw entry */
+void mv_pp2x_prs_tcam_ai_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits,
+ unsigned int enable)
+{
+ int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+ for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+ if (!(enable & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ pe->tcam.byte[HW_BYTE_OFFS(ai_idx)] |= 1 << i;
+ else
+ pe->tcam.byte[HW_BYTE_OFFS(ai_idx)] &= ~(1 << i);
+ }
+
+ pe->tcam.byte[HW_BYTE_OFFS(MVPP2_PRS_TCAM_EN_OFFS(ai_idx))] |= enable;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_tcam_ai_update);
+
+/* Get ai bits from tcam sw entry */
+static int mv_pp2x_prs_tcam_ai_get(struct mv_pp2x_prs_entry *pe)
+{
+ return pe->tcam.byte[HW_BYTE_OFFS(MVPP2_PRS_TCAM_AI_BYTE)];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mv_pp2x_prs_match_etype(struct mv_pp2x_prs_entry *pe, int offset,
+ unsigned short ethertype)
+{
+ mv_pp2x_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+ mv_pp2x_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mv_pp2x_prs_sram_bits_set(struct mv_pp2x_prs_entry *pe, int bit_num,
+ int val)
+{
+ pe->sram.byte[SRAM_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mv_pp2x_prs_sram_bits_clear(struct mv_pp2x_prs_entry *pe,
+ int bit_num, int val)
+{
+ pe->sram.byte[SRAM_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+void mv_pp2x_prs_sram_ri_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+ int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mv_pp2x_prs_sram_bits_set(pe, ri_off + i, 1);
+ else
+ mv_pp2x_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+ mv_pp2x_prs_sram_bits_set(pe,
+ MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+ }
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sram_ri_update);
+
+/* Obtain ri bits from sram sw entry */
+static int mv_pp2x_prs_sram_ri_get(struct mv_pp2x_prs_entry *pe)
+{
+ return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+void mv_pp2x_prs_sram_ai_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits, unsigned int mask)
+{
+ unsigned int i;
+ int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ if (bits & BIT(i))
+ mv_pp2x_prs_sram_bits_set(pe, ai_off + i, 1);
+ else
+ mv_pp2x_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+ mv_pp2x_prs_sram_bits_set(pe,
+ MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+ }
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sram_ai_update);
+
+/* Read ai bits from sram sw entry */
+static int mv_pp2x_prs_sram_ai_get(struct mv_pp2x_prs_entry *pe)
+{
+ u8 bits;
+ int ai_off = SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+ int ai_en_off = ai_off + 1;
+ int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+ bits = (pe->sram.byte[ai_off] >> ai_shift) |
+ (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+ return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+void mv_pp2x_prs_sram_next_lu_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int lu)
+{
+ int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+ mv_pp2x_prs_sram_bits_clear(pe, sram_next_off,
+ MVPP2_PRS_SRAM_NEXT_LU_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sram_next_lu_set);
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mv_pp2x_prs_sram_shift_set(struct mv_pp2x_prs_entry *pe, int shift,
+ unsigned int op)
+{
+ /* Set sign */
+ if (shift < 0) {
+ mv_pp2x_prs_sram_bits_set(pe,
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ shift = 0 - shift;
+ } else {
+ mv_pp2x_prs_sram_bits_clear(pe,
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+ (unsigned char)shift;
+
+ /* Reset and set operation */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+ /* Set base offset as current */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mv_pp2x_prs_sram_offset_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int type, int offset,
+ unsigned int op)
+{
+ /* Set sign */
+ if (offset < 0) {
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ offset = 0 - offset;
+ } else {
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+ MVPP2_PRS_SRAM_UDF_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+ pe->sram.byte[SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+ MVPP2_PRS_SRAM_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_UDF_MASK >>
+ (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ pe->sram.byte[SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+ MVPP2_PRS_SRAM_UDF_BITS)] |=
+ (offset >> (8 -
+ (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+ /* Set offset type */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+ MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+ /* Set offset operation */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+ (8 -
+ (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+ (op >> (8 -
+ (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ /* Set base offset as current */
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static struct mv_pp2x_prs_entry *mv_pp2x_prs_flow_find(struct mv_pp2x_hw *hw,
+ int flow,
+ unsigned int ri,
+ unsigned int ri_mask)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid;
+ unsigned int dword, enable;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return NULL;
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+ for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+ u8 bits;
+
+ if (!hw->prs_shadow[tid].valid ||
+ hw->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+ continue;
+
+ pe->index = tid;
+ mv_pp2x_prs_hw_read(hw, pe);
+
+ /* Check result info, because there maybe several
+ * TCAM lines to generate the same flow
+ */
+ mv_pp2x_prs_tcam_data_dword_get(pe, 0, &dword, &enable);
+ if ((dword != ri) || (enable != ri_mask))
+ continue;
+
+ bits = mv_pp2x_prs_sram_ai_get(pe);
+
+ /* Sram store classification lookup ID in AI bits [5:0] */
+ if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+ return pe;
+ }
+ kfree(pe);
+
+ return NULL;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mv_pp2x_prs_tcam_first_free(struct mv_pp2x_hw *hw,
+ unsigned char start,
+ unsigned char end)
+{
+ int tid;
+
+ if (start > end)
+ swap(start, end);
+
+ if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
+ end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+
+ for (tid = start; tid <= end; tid++) {
+ if (!hw->prs_shadow[tid].valid)
+ return tid;
+ }
+ pr_err("Out of TCAM Entries !!: %s(%d)\n", __FILENAME__, __LINE__);
+ return -EINVAL;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mv_pp2x_prs_mac_drop_all_set(struct mv_pp2x_hw *hw,
+ int port, bool add)
+{
+ struct mv_pp2x_prs_entry pe;
+
+ if (hw->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+ /* Entry exist - update port only */
+ pe.index = MVPP2_PE_DROP_ALL;
+ mv_pp2x_prs_hw_read(hw, &pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_DROP_ALL;
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(&pe, port, add);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Set port to promiscuous mode */
+void mv_pp2x_prs_mac_promisc_set(struct mv_pp2x_hw *hw, int port, bool add)
+{
+ struct mv_pp2x_prs_entry pe;
+
+ /* Promiscuous mode - Accept unknown packets */
+
+ if (hw->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+ /* Entry exist - update port only */
+ pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+ mv_pp2x_prs_hw_read(hw, &pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+
+ /* Continue - set next lookup */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+ MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Shift to ethertype */
+ mv_pp2x_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(&pe, port, add);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Accept multicast */
+void mv_pp2x_prs_mac_multi_set(struct mv_pp2x_hw *hw, int port, int index,
+ bool add)
+{
+ struct mv_pp2x_prs_entry pe;
+ unsigned char da_mc;
+
+ /* Ethernet multicast address first byte is
+ * 0x01 for IPv4 and 0x33 for IPv6
+ */
+ da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+ if (hw->prs_shadow[index].valid) {
+ /* Entry exist - update port only */
+ pe.index = index;
+ mv_pp2x_prs_hw_read(hw, &pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+ pe.index = index;
+
+ /* Continue - set next lookup */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+ MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Update tcam entry data first byte */
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
+
+ /* Shift to ethertype */
+ mv_pp2x_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(&pe, port, add);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mv_pp2x_prs_dsa_tag_set(struct mv_pp2x_hw *hw, int port, bool add,
+ bool tagged, bool extend)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid, shift;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ shift = 4;
+ }
+
+ if (hw->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ pe.index = tid;
+ mv_pp2x_prs_hw_read(hw, &pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+ mv_pp2x_prs_sram_shift_set(&pe, shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ /* Clear all ai bits for next iteration */
+ mv_pp2x_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+ /* If packet is tagged continue check vlans */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, 0);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(&pe, port, add);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mv_pp2x_prs_dsa_tag_ethertype_set(struct mv_pp2x_hw *hw, int port,
+ bool add, bool tagged,
+ bool extend)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid, shift, port_mask;
+
+ if (extend) {
+ tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+ MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+ port_mask = 0;
+ shift = 8;
+ } else {
+ tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+ MVPP2_PE_ETYPE_DSA_UNTAGGED;
+ port_mask = MVPP2_PRS_PORT_MASK;
+ shift = 4;
+ }
+
+ if (hw->prs_shadow[tid].valid) {
+ /* Entry exist - update port only */
+ pe.index = tid;
+ mv_pp2x_prs_hw_read(hw, &pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = tid;
+
+ /* Set ethertype */
+ mv_pp2x_prs_match_etype(&pe, 0, ETH_P_EDSA);
+ mv_pp2x_prs_match_etype(&pe, 2, 0);
+
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+ MVPP2_PRS_RI_DSA_MASK);
+ /* Shift ethertype + 2 byte reserved + tag*/
+ mv_pp2x_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_DSA);
+
+ if (tagged) {
+ /* Set tagged bit in DSA tag */
+ mv_pp2x_prs_tcam_data_byte_set(&pe,
+ MVPP2_ETH_TYPE_LEN + 2 + 3,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+ /* Clear all ai bits for next iteration */
+ mv_pp2x_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+ /* If packet is tagged continue check vlans */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ }
+ /* Mask/unmask all ports, depending on dsa type */
+ mv_pp2x_prs_tcam_port_map_set(&pe, port_mask);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(&pe, port, add);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static struct mv_pp2x_prs_entry *mv_pp2x_prs_vlan_find(struct mv_pp2x_hw *hw,
+ unsigned short tpid,
+ int ai)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return NULL;
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_bits, ai_bits;
+ bool match;
+
+ if (!hw->prs_shadow[tid].valid ||
+ hw->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ pe->index = tid;
+
+ mv_pp2x_prs_hw_read(hw, pe);
+ match = mv_pp2x_prs_tcam_data_cmp(pe, 0, swab16(tpid));
+ if (!match)
+ continue;
+
+ /* Get vlan type */
+ ri_bits = mv_pp2x_prs_sram_ri_get(pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+ /* Get current ai value from tcam */
+ ai_bits = mv_pp2x_prs_tcam_ai_get(pe);
+ /* Clear double vlan bit */
+ ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+ if (ai != ai_bits)
+ continue;
+
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ return pe;
+ }
+ kfree(pe);
+
+ return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+static int mv_pp2x_prs_vlan_add(struct mv_pp2x_hw *hw, unsigned short tpid,
+ int ai, unsigned int port_map)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid_aux, tid;
+ int ret = 0;
+
+ pe = mv_pp2x_prs_vlan_find(hw, tpid, ai);
+
+ if (!pe) {
+ /* Create new tcam entry */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ /* Get last double vlan tid */
+ for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+ tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+ unsigned int ri_bits;
+
+ if (!hw->prs_shadow[tid_aux].valid ||
+ hw->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ pe->index = tid_aux;
+ mv_pp2x_prs_hw_read(hw, pe);
+ ri_bits = mv_pp2x_prs_sram_ri_get(pe);
+ if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+ MVPP2_PRS_RI_VLAN_DOUBLE)
+ break;
+ }
+
+ if (tid <= tid_aux) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ memset(pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ pe->index = tid;
+
+ mv_pp2x_prs_match_etype(pe, 0, tpid);
+
+ mv_pp2x_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+ /* Shift 4 bytes - skip 1 vlan tag */
+ mv_pp2x_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Clear all ai bits for next iteration */
+ mv_pp2x_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+ mv_pp2x_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ } else {
+ ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+ mv_pp2x_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ }
+ mv_pp2x_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+ mv_pp2x_prs_shadow_set(hw, pe->index, MVPP2_PRS_LU_VLAN);
+ }
+ /* Update ports' mask */
+ mv_pp2x_prs_tcam_port_map_set(pe, port_map);
+
+ mv_pp2x_prs_hw_write(hw, pe);
+
+error:
+ kfree(pe);
+
+ return ret;
+}
+
+/* Get first free double vlan ai number */
+static int mv_pp2x_prs_double_vlan_ai_free_get(struct mv_pp2x_hw *hw)
+{
+ int i;
+
+ for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+ if (!hw->prs_double_vlans[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static struct mv_pp2x_prs_entry *mv_pp2x_prs_double_vlan_find(
+ struct mv_pp2x_hw *hw, unsigned short tpid1, unsigned short tpid2)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return NULL;
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (tid = MVPP2_PE_FIRST_FREE_TID;
+ tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+ unsigned int ri_mask;
+ bool match;
+
+ if (!hw->prs_shadow[tid].valid ||
+ hw->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ pe->index = tid;
+ mv_pp2x_prs_hw_read(hw, pe);
+
+ match = mv_pp2x_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) &&
+ mv_pp2x_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
+
+ if (!match)
+ continue;
+
+ ri_mask = mv_pp2x_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+ return pe;
+ }
+ kfree(pe);
+
+ return NULL;
+}
+
+/* Add or update double vlan entry */
+static int mv_pp2x_prs_double_vlan_add(struct mv_pp2x_hw *hw,
+ unsigned short tpid1,
+ unsigned short tpid2,
+ unsigned int port_map)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid_aux, tid, ai, ret = 0;
+
+ pe = mv_pp2x_prs_double_vlan_find(hw, tpid1, tpid2);
+
+ if (!pe) {
+ /* Create new tcam entry */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ /* Set ai value for new double vlan entry */
+ ai = mv_pp2x_prs_double_vlan_ai_free_get(hw);
+ if (ai < 0) {
+ ret = ai;
+ goto error;
+ }
+
+ /* Get first single/triple vlan tid */
+ for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+ tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+ unsigned int ri_bits;
+
+ if (!hw->prs_shadow[tid_aux].valid ||
+ hw->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+ continue;
+
+ pe->index = tid_aux;
+ mv_pp2x_prs_hw_read(hw, pe);
+ ri_bits = mv_pp2x_prs_sram_ri_get(pe);
+ ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+ if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+ ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+ break;
+ }
+
+ if (tid >= tid_aux) {
+ ret = -ERANGE;
+ goto error;
+ }
+
+ memset(pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ pe->index = tid;
+
+ hw->prs_double_vlans[ai] = true;
+
+ mv_pp2x_prs_match_etype(pe, 0, tpid1);
+ mv_pp2x_prs_match_etype(pe, 4, tpid2);
+
+ mv_pp2x_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ /* Shift 8 bytes - skip 2 vlan tags */
+ mv_pp2x_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mv_pp2x_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+ mv_pp2x_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ mv_pp2x_prs_shadow_set(hw, pe->index, MVPP2_PRS_LU_VLAN);
+ }
+
+ /* Update ports' mask */
+ mv_pp2x_prs_tcam_port_map_set(pe, port_map);
+ mv_pp2x_prs_hw_write(hw, pe);
+
+error:
+ kfree(pe);
+ return ret;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mv_pp2x_prs_ip4_proto(struct mv_pp2x_hw *hw, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_IGMP))
+ return -EINVAL;
+
+ /* Not fragmented packet */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ /* Set next lu to IPv4 */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mv_pp2x_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mv_pp2x_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_FALSE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 2, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK_L);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 3, 0x00,
+ MVPP2_PRS_TCAM_PROTO_MASK);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 5, proto,
+ MVPP2_PRS_TCAM_PROTO_MASK);
+ mv_pp2x_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Fragmented packet */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mv_pp2x_prs_sram_ri_update(&pe, ri, ri_mask);
+ mv_pp2x_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
+ ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mv_pp2x_prs_ip4_cast(struct mv_pp2x_hw *hw, unsigned short l3_cast)
+{
+ struct mv_pp2x_prs_entry pe;
+ int mask, tid;
+
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = tid;
+
+ switch (l3_cast) {
+ case MVPP2_PRS_L3_MULTI_CAST:
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+ MVPP2_PRS_IPV4_MC_MASK);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ case MVPP2_PRS_L3_BROAD_CAST:
+ mask = MVPP2_PRS_IPV4_BC_MASK;
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* Set entries for protocols over IPv6 */
+static int mv_pp2x_prs_ip6_proto(struct mv_pp2x_hw *hw, unsigned short proto,
+ unsigned int ri, unsigned int ri_mask)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid;
+
+ if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+ (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+ return -EINVAL;
+
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, ri, ri_mask);
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 6,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0, proto,
+ MVPP2_PRS_TCAM_PROTO_MASK);
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Write HW */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mv_pp2x_prs_ip6_cast(struct mv_pp2x_hw *hw, unsigned short l3_cast)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid;
+
+ if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+ return -EINVAL;
+
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mv_pp2x_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPv6 NH */
+ mv_pp2x_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+ MVPP2_PRS_IPV6_MC_MASK);
+ mv_pp2x_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* Parser per-port initialization */
+void mv_pp2x_prs_hw_port_init(struct mv_pp2x_hw *hw, int port, int lu_first,
+ int lu_max, int offset)
+{
+ u32 val;
+
+ /* Set lookup ID */
+ val = mv_pp2x_read(hw, MVPP2_PRS_INIT_LOOKUP_REG);
+ val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+ val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+ mv_pp2x_write(hw, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+ /* Set maximum number of loops for packet received from port */
+ val = mv_pp2x_read(hw, MVPP2_PRS_MAX_LOOP_REG(port));
+ val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+ val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+ mv_pp2x_write(hw, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+ /* Set initial offset for packet header extraction for the first
+ * searching loop
+ */
+ val = mv_pp2x_read(hw, MVPP2_PRS_INIT_OFFS_REG(port));
+ val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+ val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+ mv_pp2x_write(hw, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_hw_port_init);
+
+/* Default flow entries initialization for all ports */
+static void mv_pp2x_prs_def_flow_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, 0);
+
+ /* Set flow ID*/
+ mv_pp2x_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_hw_write(hw, &pe);
+ }
+}
+
+/* Set default entry for Marvell Header field */
+static void mv_pp2x_prs_mh_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+
+ pe.index = MVPP2_PE_MH_DEFAULT;
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MH);
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mv_pp2x_prs_mac_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+
+ /* Non-promiscuous mode for all ports - DROP unknown packets */
+ pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MAC);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* place holders only - no ports */
+ mv_pp2x_prs_mac_drop_all_set(hw, 0, false);
+ mv_pp2x_prs_mac_promisc_set(hw, 0, false);
+
+ mv_pp2x_prs_mac_multi_set(hw, 0, MVPP2_PE_MAC_MC_ALL, false);
+ mv_pp2x_prs_mac_multi_set(hw, 0, MVPP2_PE_MAC_MC_IP6, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mv_pp2x_prs_dsa_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+
+ /* None tagged EDSA entry - place holder */
+ mv_pp2x_prs_dsa_tag_set(hw, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA entry - place holder */
+ mv_pp2x_prs_dsa_tag_set(hw, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA entry - place holder */
+ mv_pp2x_prs_dsa_tag_set(hw, 0, false, MVPP2_PRS_UNTAGGED,
+ MVPP2_PRS_DSA);
+
+ /* Tagged DSA entry - place holder */
+ mv_pp2x_prs_dsa_tag_set(hw, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* None tagged EDSA ethertype entry - place holder*/
+ mv_pp2x_prs_dsa_tag_ethertype_set(hw, 0, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA ethertype entry - place holder*/
+ mv_pp2x_prs_dsa_tag_ethertype_set(hw, 0, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA ethertype entry */
+ mv_pp2x_prs_dsa_tag_ethertype_set(hw, 0, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Tagged DSA ethertype entry */
+ mv_pp2x_prs_dsa_tag_ethertype_set(hw, 0, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* Set default entry, in case DSA or EDSA tag not found */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+ pe.index = MVPP2_PE_DSA_DEFAULT;
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+ /* Shift 0 bytes */
+ mv_pp2x_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_MAC);
+
+ /* Clear all sram ai bits for next iteration */
+ mv_pp2x_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ mv_pp2x_prs_hw_write(hw, &pe);
+}
+
+/* Match basic ethertypes */
+static int mv_pp2x_prs_etype_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid;
+
+ /* Ethertype: PPPoE */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = false;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+ MVPP2_PRS_RI_PPPOE_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Ethertype: ARP */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+ /* Generate flow in the next iteration*/
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = true;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_L3_ARP,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Ethertype: LBTD */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+ /* Generate flow in the next iteration*/
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = true;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Ethertype: IPv4 without options */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, ETH_P_IP);
+ mv_pp2x_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD |
+ MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IP header */
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = false;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Ethertype: IPv4 with options */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ /* Clear tcam data before updating */
+ pe.tcam.byte[TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+ pe.tcam.byte[TCAM_DATA_MASK(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD,
+ MVPP2_PRS_IPV4_HEAD_MASK);
+
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = false;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Ethertype: IPv6 without options */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+ /* Skip DIP of IPV6 header */
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = false;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+ pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Generate flow in the next iteration*/
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Set L3 offset even it's unknown L3 */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_L2);
+ hw->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+ hw->prs_shadow[pe.index].finish = true;
+ mv_pp2x_prs_shadow_ri_set(hw, pe.index, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ mv_pp2x_prs_hw_write(hw, &pe);
+ return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mv_pp2x_prs_vlan_init(struct platform_device *pdev,
+ struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int err;
+
+ hw->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+ MVPP2_PRS_DBL_VLANS_MAX,
+ GFP_KERNEL);
+ if (!hw->prs_double_vlans)
+ return -ENOMEM;
+ /* Double VLAN: 0x8100, 0x88A8 */
+ err = mv_pp2x_prs_double_vlan_add(hw, ETH_P_8021Q, ETH_P_8021AD,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Double VLAN: 0x8100, 0x8100 */
+ err = mv_pp2x_prs_double_vlan_add(hw, ETH_P_8021Q, ETH_P_8021Q,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x88a8 */
+ err = mv_pp2x_prs_vlan_add(hw, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Single VLAN: 0x8100 */
+ err = mv_pp2x_prs_vlan_add(hw, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+ MVPP2_PRS_PORT_MASK);
+ if (err)
+ return err;
+
+ /* Set default double vlan entry */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_DBL;
+
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ /* Clear ai for next iterations */
+ mv_pp2x_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+ MVPP2_PRS_DBL_VLAN_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_VLAN);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Set default vlan none entry */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = MVPP2_PE_VLAN_NONE;
+
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+ MVPP2_PRS_RI_VLAN_MASK);
+
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_VLAN);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mv_pp2x_prs_pppoe_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid;
+
+ /* IPv4 over PPPoE with options */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, PPP_IP);
+
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IP header */
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_PPPOE);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* IPv4 over PPPoE without options */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe.index = tid;
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
+
+ /* Clear ri before updating */
+ pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_PPPOE);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* IPv6 over PPPoE */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mv_pp2x_prs_match_etype(&pe, 0, PPP_IPV6);
+
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* Skip eth_type + 4 bytes of IPv6 header */
+ mv_pp2x_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_PPPOE);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Non-IP over PPPoE */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
+
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Set L3 offset even if it's unknown L3 */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_PPPOE);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mv_pp2x_prs_ip4_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int err;
+
+ /* Set entries for TCP, UDP and IGMP over IPv4 */
+ err = mv_pp2x_prs_ip4_proto(hw, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip4_proto(hw, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip4_proto(hw, IPPROTO_IGMP,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+
+ if (err)
+ return err;
+
+ /* IPv4 Broadcast */
+ err = mv_pp2x_prs_ip4_cast(hw, MVPP2_PRS_L3_BROAD_CAST);
+
+ if (err)
+ return err;
+
+ /* IPv4 Multicast */
+ err = mv_pp2x_prs_ip4_cast(hw, MVPP2_PRS_L3_MULTI_CAST);
+
+ if (err)
+ return err;
+
+ /* Default IPv4 entry for unknown protocols */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+ /* Set next lu to IPv4 */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L4 offset */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ mv_pp2x_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Default IPv4 entry for unicast address */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+ return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mv_pp2x_prs_ip6_init(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_prs_entry pe;
+ int tid, err;
+
+ /* Set entries for TCP, UDP and ICMP over IPv6 */
+ err = mv_pp2x_prs_ip6_proto(hw, IPPROTO_TCP,
+ MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip6_proto(hw, IPPROTO_UDP,
+ MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip6_proto(hw, IPPROTO_ICMPV6,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+ MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK |
+ MVPP2_PRS_RI_UDF3_MASK);
+ if (err)
+ return err;
+
+ /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+ /* Result Info: UDF7=1, DS lite */
+ err = mv_pp2x_prs_ip6_proto(hw, IPPROTO_IPIP,
+ MVPP2_PRS_RI_UDF7_IP6_LITE,
+ MVPP2_PRS_RI_UDF7_MASK);
+ if (err)
+ return err;
+
+ /* IPv6 multicast */
+ err = mv_pp2x_prs_ip6_cast(hw, MVPP2_PRS_L3_MULTI_CAST);
+ if (err)
+ return err;
+
+ /* Entry for checking hop limit */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = tid;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+ MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_L3_PROTO_MASK |
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mv_pp2x_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Default IPv6 entry for unknown protocols */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+ /* Set L4 offset relatively to our current place */
+ mv_pp2x_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof(struct ipv6hdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Default IPv6 entry for unknown ext protocols */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+ /* Finished: go to flowid generation */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mv_pp2x_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+ MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_EXT_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP4);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ /* Default IPv6 entry for unicast address */
+ memset(&pe, 0, sizeof(struct mv_pp2x_prs_entry));
+ mv_pp2x_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+ /* Finished: go to IPv6 again */
+ mv_pp2x_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+ MVPP2_PRS_RI_L3_ADDR_MASK);
+ mv_pp2x_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+ MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Shift back to IPV6 NH */
+ mv_pp2x_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mv_pp2x_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+ /* Unmask all ports */
+ mv_pp2x_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mv_pp2x_prs_shadow_set(hw, pe.index, MVPP2_PRS_LU_IP6);
+ mv_pp2x_prs_hw_write(hw, &pe);
+
+ return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mv_pp2x_prs_mac_range_equals(struct mv_pp2x_prs_entry *pe,
+ const u8 *da, unsigned char *mask)
+{
+ unsigned char tcam_byte, tcam_mask;
+ int index;
+
+ for (index = 0; index < ETH_ALEN; index++) {
+ mv_pp2x_prs_tcam_data_byte_get(pe, index, &tcam_byte,
+ &tcam_mask);
+ if (tcam_mask != mask[index])
+ return false;
+
+ if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+ return false;
+ }
+
+ return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static struct mv_pp2x_prs_entry *
+mv_pp2x_prs_mac_da_range_find(struct mv_pp2x_hw *hw, int pmap, const u8 *da,
+ unsigned char *mask, int udf_type)
+{
+ struct mv_pp2x_prs_entry *pe;
+ int tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return NULL;
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+
+ /* Go through the all entires with MVPP2_PRS_LU_MAC */
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned int entry_pmap;
+
+ if (!hw->prs_shadow[tid].valid ||
+ (hw->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (hw->prs_shadow[tid].udf != udf_type))
+ continue;
+
+ pe->index = tid;
+ mv_pp2x_prs_hw_read(hw, pe);
+ entry_pmap = mv_pp2x_prs_tcam_port_map_get(pe);
+
+ if (mv_pp2x_prs_mac_range_equals(pe, da, mask))
+ return pe;
+ }
+ kfree(pe);
+
+ return NULL;
+}
+
+/* Update parser's mac da entry */
+int mv_pp2x_prs_mac_da_accept(struct mv_pp2x_port *port, const u8 *da, bool add)
+{
+ struct mv_pp2x_prs_entry *pe;
+ unsigned int pmap, len, ri, tid;
+ unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+ pe = mv_pp2x_prs_mac_da_range_find(hw, (1 << port->id), da, mask,
+ MVPP2_PRS_UDF_MAC_DEF);
+
+ /* No such entry */
+ if (!pe) {
+ if (!add)
+ return 0;
+
+ /* Create new TCAM entry */
+ /* Go through all entries from first to last in MAC range */
+ tid = mv_pp2x_prs_tcam_first_free(hw, MVPP2_PE_MAC_RANGE_START,
+ MVPP2_PE_MAC_RANGE_END);
+ if (tid < 0)
+ return tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -1;
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+ pe->index = tid;
+
+ /* Mask all ports */
+ mv_pp2x_prs_tcam_port_map_set(pe, 0);
+ }
+
+ /* Update port mask */
+ mv_pp2x_prs_tcam_port_set(pe, port->id, add);
+
+ /* Invalidate the entry if no ports are left enabled */
+ pmap = mv_pp2x_prs_tcam_port_map_get(pe);
+ if (pmap == 0) {
+ if (add) {
+ kfree(pe);
+ return -1;
+ }
+ mv_pp2x_prs_hw_inv(hw, pe->index);
+ hw->prs_shadow[pe->index].valid = false;
+ kfree(pe);
+ return 0;
+ }
+
+ /* Continue - set next lookup */
+ mv_pp2x_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mv_pp2x_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+ /* Set result info bits */
+ if (is_broadcast_ether_addr(da))
+ ri = MVPP2_PRS_RI_L2_BCAST;
+ else if (is_multicast_ether_addr(da))
+ ri = MVPP2_PRS_RI_L2_MCAST;
+ else
+ ri = MVPP2_PRS_RI_L2_UCAST;
+ /* Set M2M */
+ if (ether_addr_equal(da, port->dev->dev_addr))
+ ri |= MVPP2_PRS_RI_MAC_ME_MASK;
+
+ mv_pp2x_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+ mv_pp2x_prs_shadow_ri_set(hw, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ MVPP2_PRS_RI_MAC_ME_MASK);
+
+ /* Shift to ethertype */
+ mv_pp2x_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table and hw entry */
+ hw->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+ mv_pp2x_prs_shadow_set(hw, pe->index, MVPP2_PRS_LU_MAC);
+ mv_pp2x_prs_hw_write(hw, pe);
+
+ kfree(pe);
+
+ return 0;
+}
+
+int mv_pp2x_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+ u8 old_da[ETH_ALEN];
+ int err;
+
+ if (ether_addr_equal(da, dev->dev_addr))
+ return 0;
+
+ /* Record old DA */
+ ether_addr_copy(old_da, dev->dev_addr);
+
+ /* Remove old parser entry */
+ err = mv_pp2x_prs_mac_da_accept(port, dev->dev_addr, false);
+ if (err)
+ return err;
+
+ /* Set addr in the device */
+ ether_addr_copy(dev->dev_addr, da);
+
+ /* Add new parser entry */
+ err = mv_pp2x_prs_mac_da_accept(port, da, true);
+ if (err) {
+ /* Restore addr in the device */
+ ether_addr_copy(dev->dev_addr, old_da);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool mv_pp2x_mac_in_uc_list(struct net_device *dev, const u8 *da)
+{
+ struct netdev_hw_addr *ha;
+
+ if (!netdev_uc_count(dev))
+ return false;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (ether_addr_equal(da, dev->dev_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool mv_pp2x_mac_in_mc_list(struct net_device *dev, const u8 *da)
+{
+ struct netdev_hw_addr *ha;
+
+ if (!netdev_mc_count(dev))
+ return false;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (ether_addr_equal(da, dev->dev_addr))
+ return true;
+ }
+
+ return false;
+}
+
+/* Delete port's uc/mc/bc simple (not range) entries with options */
+void mv_pp2x_prs_mac_entry_del(struct mv_pp2x_port *port,
+ enum mv_pp2x_l2_cast l2_cast,
+ enum mv_pp2x_mac_del_option op)
+{
+ struct mv_pp2x_prs_entry pe;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+ int index, tid;
+
+ for (tid = MVPP2_PE_MAC_RANGE_START;
+ tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
+ unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+ if (!hw->prs_shadow[tid].valid ||
+ (hw->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+ (hw->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+ continue;
+
+ /* Only simple mac entries */
+ pe.index = tid;
+ mv_pp2x_prs_hw_read(hw, &pe);
+
+ /* Read mac addr from entry */
+ for (index = 0; index < ETH_ALEN; index++)
+ mv_pp2x_prs_tcam_data_byte_get(&pe, index, &da[index],
+ &da_mask[index]);
+ switch (l2_cast) {
+ case MVPP2_PRS_MAC_UC:
+ /* Do not delete M2M entry */
+ if (is_unicast_ether_addr(da) &&
+ !ether_addr_equal(da, port->dev->dev_addr)) {
+ if (op == MVPP2_DEL_MAC_NOT_IN_LIST &&
+ mv_pp2x_mac_in_uc_list(port->dev, da))
+ continue;
+ /* Delete this entry */
+ mv_pp2x_prs_mac_da_accept(port, da, false);
+ }
+ break;
+ case MVPP2_PRS_MAC_MC:
+ if (is_multicast_ether_addr(da) &&
+ !is_broadcast_ether_addr(da)) {
+ if (op == MVPP2_DEL_MAC_NOT_IN_LIST &&
+ mv_pp2x_mac_in_mc_list(port->dev, da))
+ continue;
+ /* Delete this entry */
+ mv_pp2x_prs_mac_da_accept(port, da, false);
+ }
+ break;
+ case MVPP2_PRS_MAC_BC:
+ if (is_broadcast_ether_addr(da))
+ /* Delete this entry */
+ mv_pp2x_prs_mac_da_accept(port, da, false);
+ break;
+ }
+ }
+}
+
+int mv_pp2x_prs_tag_mode_set(struct mv_pp2x_hw *hw, int port, int type)
+{
+ switch (type) {
+ case MVPP2_TAG_TYPE_EDSA:
+ /* Add port to EDSA entries */
+ mv_pp2x_prs_dsa_tag_set(hw, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ /* Remove port from DSA entries */
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ break;
+
+ case MVPP2_TAG_TYPE_DSA:
+ /* Add port to DSA entries */
+ mv_pp2x_prs_dsa_tag_set(hw, port, true,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, true,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ /* Remove port from EDSA entries */
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ case MVPP2_TAG_TYPE_MH:
+ case MVPP2_TAG_TYPE_NONE:
+ /* Remove port form EDSA and DSA entries */
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ mv_pp2x_prs_dsa_tag_set(hw, port, false,
+ MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+
+ default:
+ if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set prs flow for the port */
+int mv_pp2x_prs_def_flow(struct mv_pp2x_port *port)
+{
+ struct mv_pp2x_prs_entry *pe;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+ int tid;
+
+ pe = mv_pp2x_prs_flow_find(hw, port->id, 0, 0);
+
+ /* Such entry not exist */
+ if (!pe) {
+ /* Go through the all entires from last to first */
+ tid = mv_pp2x_prs_tcam_first_free(hw,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0)
+ return tid;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+ pe->index = tid;
+
+ /* Set flow ID*/
+ mv_pp2x_prs_sram_ai_update(pe, port->id,
+ MVPP2_PRS_FLOW_ID_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe->index, MVPP2_PRS_LU_FLOWS);
+ }
+
+ mv_pp2x_prs_tcam_port_map_set(pe, (1 << port->id));
+ mv_pp2x_prs_hw_write(hw, pe);
+ kfree(pe);
+
+ return 0;
+}
+
+/* Set prs dedicated flow for the port */
+int mv_pp2x_prs_flow_id_gen(struct mv_pp2x_port *port, u32 flow_id,
+ u32 res, u32 res_mask)
+{
+ struct mv_pp2x_prs_entry *pe;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+ int tid;
+ unsigned int pmap = 0;
+
+ pe = mv_pp2x_prs_flow_find(hw, flow_id, res, res_mask);
+
+ /* Such entry not exist */
+ if (!pe) {
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ /* Go through the all entires from last to first */
+ tid = mv_pp2x_prs_tcam_first_free(hw,
+ MVPP2_PE_LAST_FREE_TID,
+ MVPP2_PE_FIRST_FREE_TID);
+ if (tid < 0) {
+ kfree(pe);
+ return tid;
+ }
+
+ mv_pp2x_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+ pe->index = tid;
+
+ mv_pp2x_prs_sram_ai_update(pe, flow_id, MVPP2_PRS_FLOW_ID_MASK);
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table */
+ mv_pp2x_prs_shadow_set(hw, pe->index, MVPP2_PRS_LU_FLOWS);
+
+ /*update result data and mask*/
+ mv_pp2x_prs_tcam_data_dword_set(pe, 0, res, res_mask);
+ } else {
+ pmap = mv_pp2x_prs_tcam_port_map_get(pe);
+ }
+
+ mv_pp2x_prs_tcam_port_map_set(pe, (1 << port->id) | pmap);
+ mv_pp2x_prs_hw_write(hw, pe);
+ kfree(pe);
+
+ return 0;
+}
+
+int mv_pp2x_prs_flow_set(struct mv_pp2x_port *port)
+{
+ int index, ret;
+
+ for (index = 0; index < MVPP2_PRS_FL_TCAM_NUM; index++) {
+ ret = mv_pp2x_prs_flow_id_gen(port,
+ mv_pp2x_prs_flow_id_array[index].flow_id,
+ mv_pp2x_prs_flow_id_array[index].prs_result.ri,
+ mv_pp2x_prs_flow_id_array[index].prs_result.ri_mask);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void mv_pp2x_prs_flow_id_attr_set(int flow_id, int ri, int ri_mask)
+{
+ int flow_attr = 0;
+
+ flow_attr |= MVPP2_PRS_FL_ATTR_VLAN_BIT;
+ if (ri_mask & MVPP2_PRS_RI_VLAN_MASK &&
+ (ri & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_NONE)
+ flow_attr &= ~MVPP2_PRS_FL_ATTR_VLAN_BIT;
+
+ if ((ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_IP4 ||
+ (ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_IP4_OPT ||
+ (ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_IP4_OTHER)
+ flow_attr |= MVPP2_PRS_FL_ATTR_IP4_BIT;
+
+ if ((ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_IP6 ||
+ (ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_IP6_EXT)
+ flow_attr |= MVPP2_PRS_FL_ATTR_IP6_BIT;
+
+ if ((ri & MVPP2_PRS_RI_L3_PROTO_MASK) == MVPP2_PRS_RI_L3_ARP)
+ flow_attr |= MVPP2_PRS_FL_ATTR_ARP_BIT;
+
+ if (ri & MVPP2_PRS_RI_IP_FRAG_MASK)
+ flow_attr |= MVPP2_PRS_FL_ATTR_FRAG_BIT;
+
+ if ((ri & MVPP2_PRS_RI_L4_PROTO_MASK) == MVPP2_PRS_RI_L4_TCP)
+ flow_attr |= MVPP2_PRS_FL_ATTR_TCP_BIT;
+
+ if ((ri & MVPP2_PRS_RI_L4_PROTO_MASK) == MVPP2_PRS_RI_L4_UDP)
+ flow_attr |= MVPP2_PRS_FL_ATTR_UDP_BIT;
+
+ mv_pp2x_prs_flow_id_attr_tbl[flow_id] = flow_attr;
+}
+
+/* Init lookup id attribute array */
+void mv_pp2x_prs_flow_id_attr_init(void)
+{
+ int index;
+ u32 ri, ri_mask, flow_id;
+
+ for (index = 0; index < MVPP2_PRS_FL_TCAM_NUM; index++) {
+ ri = mv_pp2x_prs_flow_id_array[index].prs_result.ri;
+ ri_mask = mv_pp2x_prs_flow_id_array[index].prs_result.ri_mask;
+ flow_id = mv_pp2x_prs_flow_id_array[index].flow_id;
+
+ mv_pp2x_prs_flow_id_attr_set(flow_id, ri, ri_mask);
+ }
+}
+
+int mv_pp2x_prs_flow_id_attr_get(int flow_id)
+{
+ return mv_pp2x_prs_flow_id_attr_tbl[flow_id];
+}
+
+/* Classifier configuration routines */
+
+/* Update classification flow table registers */
+void mv_pp2x_cls_flow_write(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_flow_entry *fe)
+{
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+}
+EXPORT_SYMBOL(mv_pp2x_cls_flow_write);
+
+static void mv_pp2x_cls_flow_read(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_flow_entry *fe)
+{
+ fe->index = index;
+ /*write index*/
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_INDEX_REG, index);
+
+ fe->data[0] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL2_REG);
+}
+
+/* Update classification lookup table register */
+static void mv_pp2x_cls_lookup_write(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_lookup_entry *le)
+{
+ u32 val;
+
+ val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_INDEX_REG, val);
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+void mv_pp2x_cls_lookup_read(struct mv_pp2x_hw *hw, int lkpid, int way,
+ struct mv_pp2x_cls_lookup_entry *le)
+{
+ unsigned int val = 0;
+
+ /* write index reg */
+ val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_INDEX_REG, val);
+ le->way = way;
+ le->lkpid = lkpid;
+ le->data = mv_pp2x_read(hw, MVPP2_CLS_LKP_TBL_REG);
+}
+
+/* Operations on flow entry */
+int mv_pp2x_cls_sw_flow_hek_num_set(struct mv_pp2x_cls_flow_entry *fe,
+ int num_of_fields)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(num_of_fields, 0,
+ MVPP2_CLS_FLOWS_TBL_FIELDS_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[1] &= ~MVPP2_FLOW_FIELDS_NUM_MASK;
+ fe->data[1] |= (num_of_fields << MVPP2_FLOW_FIELDS_NUM);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_hek_num_set);
+
+int mv_pp2x_cls_sw_flow_hek_set(struct mv_pp2x_cls_flow_entry *fe,
+ int field_index, int field_id)
+{
+ int num_of_fields;
+
+ /* get current num_of_fields */
+ num_of_fields = ((fe->data[1] &
+ MVPP2_FLOW_FIELDS_NUM_MASK) >> MVPP2_FLOW_FIELDS_NUM);
+
+ if (num_of_fields < (field_index + 1)) {
+ pr_debug("%s:num of heks=%d ,idx(%d) out of range\n",
+ __func__, num_of_fields, field_index);
+ return -1;
+ }
+
+ fe->data[2] &= ~MVPP2_FLOW_FIELD_MASK(field_index);
+ fe->data[2] |= (field_id << MVPP2_FLOW_FIELD_ID(field_index));
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_hek_set);
+
+static void mv_pp2x_cls_sw_flow_eng_set(struct mv_pp2x_cls_flow_entry *fe,
+ int engine, int is_last)
+{
+ fe->data[0] &= ~MVPP2_FLOW_LAST_MASK;
+ fe->data[0] &= ~MVPP2_FLOW_ENGINE_MASK;
+
+ fe->data[0] |= is_last;
+ fe->data[0] |= (engine << MVPP2_FLOW_ENGINE);
+ fe->data[0] |= MVPP2_FLOW_PORT_ID_SEL_MASK;
+}
+
+/* To init flow table according to different flow */
+static void mv_pp2x_cls_flow_cos(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_flow_entry *fe,
+ int lkpid, int cos_type)
+{
+ int hek_num, field_id, lkp_type, is_last;
+ int entry_idx = hw->cls_shadow->flow_free_start;
+
+ switch (cos_type) {
+ case MVPP2_COS_TYPE_VLAN:
+ lkp_type = MVPP2_CLS_LKP_VLAN_PRI;
+ break;
+ case MVPP2_COS_TYPE_DSCP:
+ lkp_type = MVPP2_CLS_LKP_DSCP_PRI;
+ break;
+ default:
+ lkp_type = MVPP2_CLS_LKP_DEFAULT;
+ break;
+ }
+ hek_num = 0;
+ if ((lkpid == MVPP2_PRS_FL_NON_IP_UNTAG &&
+ cos_type == MVPP2_COS_TYPE_DEF) ||
+ (lkpid == MVPP2_PRS_FL_NON_IP_TAG &&
+ cos_type == MVPP2_COS_TYPE_VLAN))
+ is_last = 1;
+ else
+ is_last = 0;
+
+ /* Set SW */
+ memset(fe, 0, sizeof(struct mv_pp2x_cls_flow_entry));
+ mv_pp2x_cls_sw_flow_hek_num_set(fe, hek_num);
+ if (hek_num)
+ mv_pp2x_cls_sw_flow_hek_set(fe, 0, field_id);
+ mv_pp2x_cls_sw_flow_eng_set(fe, MVPP2_CLS_ENGINE_C2, is_last);
+ mv_pp2x_cls_sw_flow_extra_set(fe, lkp_type, MVPP2_CLS_FL_COS_PRI);
+ fe->index = entry_idx;
+
+ /* Write HW */
+ mv_pp2x_cls_flow_write(hw, fe);
+
+ /* Update Shadow */
+ if (cos_type == MVPP2_COS_TYPE_DEF)
+ hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_dflt = entry_idx;
+ else if (cos_type == MVPP2_COS_TYPE_VLAN)
+ hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_vlan = entry_idx;
+ else
+ hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_dscp = entry_idx;
+
+ /* Update first available flow entry */
+ hw->cls_shadow->flow_free_start++;
+}
+
+/* Init flow entry for RSS hash in PP22 */
+static void mv_pp2x_cls_flow_rss_hash(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_flow_entry *fe,
+ int lkpid, int rss_mode)
+{
+ int field_id[4] = {0};
+ int entry_idx = hw->cls_shadow->flow_free_start;
+ int lkpid_attr = mv_pp2x_prs_flow_id_attr_get(lkpid);
+
+ /* IP4 packet */
+ if (lkpid_attr & MVPP2_PRS_FL_ATTR_IP4_BIT) {
+ field_id[0] = MVPP2_CLS_FIELD_IP4SA;
+ field_id[1] = MVPP2_CLS_FIELD_IP4DA;
+ } else if (lkpid_attr & MVPP2_PRS_FL_ATTR_IP6_BIT) {
+ field_id[0] = MVPP2_CLS_FIELD_IP6SA;
+ field_id[1] = MVPP2_CLS_FIELD_IP6DA;
+ }
+ /* L4 port */
+ field_id[2] = MVPP2_CLS_FIELD_L4SIP;
+ field_id[3] = MVPP2_CLS_FIELD_L4DIP;
+
+ /* Set SW */
+ memset(fe, 0, sizeof(struct mv_pp2x_cls_flow_entry));
+ if (rss_mode == MVPP2_RSS_HASH_2T) {
+ mv_pp2x_cls_sw_flow_hek_num_set(fe, 2);
+ mv_pp2x_cls_sw_flow_eng_set(fe, MVPP2_CLS_ENGINE_C3HA, 1);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 0, field_id[0]);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 1, field_id[1]);
+ } else {
+ mv_pp2x_cls_sw_flow_hek_num_set(fe, 4);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 0, field_id[0]);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 1, field_id[1]);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 2, field_id[2]);
+ mv_pp2x_cls_sw_flow_hek_set(fe, 3, field_id[3]);
+ mv_pp2x_cls_sw_flow_eng_set(fe, MVPP2_CLS_ENGINE_C3HB, 1);
+ }
+ mv_pp2x_cls_sw_flow_extra_set(fe,
+ MVPP2_CLS_LKP_HASH, MVPP2_CLS_FL_RSS_PRI);
+ fe->index = entry_idx;
+
+ /* Update last for UDP NF flow */
+ if ((lkpid_attr & MVPP2_PRS_FL_ATTR_UDP_BIT) &&
+ !(lkpid_attr & MVPP2_PRS_FL_ATTR_FRAG_BIT)) {
+ if (!hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_rss1) {
+ if (rss_mode == MVPP2_RSS_HASH_2T)
+ mv_pp2x_cls_sw_flow_eng_set(fe,
+ MVPP2_CLS_ENGINE_C3HA, 0);
+ else
+ mv_pp2x_cls_sw_flow_eng_set(fe,
+ MVPP2_CLS_ENGINE_C3HB, 0);
+ }
+ }
+
+ /* Write HW */
+ mv_pp2x_cls_flow_write(hw, fe);
+
+ /* Update Shadow */
+ if (hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_rss1 == 0)
+ hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_rss1 = entry_idx;
+ else
+ hw->cls_shadow->flow_info[lkpid -
+ MVPP2_PRS_FL_START].flow_entry_rss2 = entry_idx;
+
+ /* Update first available flow entry */
+ hw->cls_shadow->flow_free_start++;
+}
+
+/* Init cls flow table according to different flow id */
+void mv_pp2x_cls_flow_tbl_config(struct mv_pp2x_hw *hw)
+{
+ int lkpid, rss_mode, lkpid_attr;
+ struct mv_pp2x_cls_flow_entry fe;
+
+ for (lkpid = MVPP2_PRS_FL_START; lkpid < MVPP2_PRS_FL_LAST; lkpid++) {
+ /* Get lookup id attribute */
+ lkpid_attr = mv_pp2x_prs_flow_id_attr_get(lkpid);
+ /* Default rss hash is based on 5T */
+ rss_mode = MVPP2_RSS_HASH_5T;
+ /* For frag packets or non-TCP&UDP, rss must be based on 2T */
+ if ((lkpid_attr & MVPP2_PRS_FL_ATTR_FRAG_BIT) ||
+ !(lkpid_attr & (MVPP2_PRS_FL_ATTR_TCP_BIT |
+ MVPP2_PRS_FL_ATTR_UDP_BIT)))
+ rss_mode = MVPP2_RSS_HASH_2T;
+
+ /* For untagged IP packets, only need default
+ * rule and dscp rule
+ */
+ if ((lkpid_attr & (MVPP2_PRS_FL_ATTR_IP4_BIT |
+ MVPP2_PRS_FL_ATTR_IP6_BIT)) &&
+ (!(lkpid_attr & MVPP2_PRS_FL_ATTR_VLAN_BIT))) {
+ /* Default rule */
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_DEF);
+ /* DSCP rule */
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_DSCP);
+ /* RSS hash rule */
+ if ((!(lkpid_attr & MVPP2_PRS_FL_ATTR_FRAG_BIT)) &&
+ (lkpid_attr & MVPP2_PRS_FL_ATTR_UDP_BIT)) {
+ /* RSS hash rules for UDP rss mode update */
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ MVPP2_RSS_HASH_2T);
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ MVPP2_RSS_HASH_5T);
+ } else {
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ rss_mode);
+ }
+ }
+
+ /* For tagged IP packets, only need vlan rule and dscp rule */
+ if ((lkpid_attr & (MVPP2_PRS_FL_ATTR_IP4_BIT |
+ MVPP2_PRS_FL_ATTR_IP6_BIT)) &&
+ (lkpid_attr & MVPP2_PRS_FL_ATTR_VLAN_BIT)) {
+ /* VLAN rule */
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_VLAN);
+ /* DSCP rule */
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_DSCP);
+ /* RSS hash rule */
+ if ((!(lkpid_attr & MVPP2_PRS_FL_ATTR_FRAG_BIT)) &&
+ (lkpid_attr & MVPP2_PRS_FL_ATTR_UDP_BIT)) {
+ /* RSS hash rules for UDP rss mode update */
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ MVPP2_RSS_HASH_2T);
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ MVPP2_RSS_HASH_5T);
+ } else {
+ mv_pp2x_cls_flow_rss_hash(hw, &fe, lkpid,
+ rss_mode);
+ }
+ }
+
+ /* For non-IP packets, only need default rule if untagged,
+ * vlan rule also needed if tagged
+ */
+ if (!(lkpid_attr & (MVPP2_PRS_FL_ATTR_IP4_BIT |
+ MVPP2_PRS_FL_ATTR_IP6_BIT))) {
+ /* Default rule */
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_DEF);
+ /* VLAN rule if tagged */
+ if (lkpid_attr & MVPP2_PRS_FL_ATTR_VLAN_BIT)
+ mv_pp2x_cls_flow_cos(hw, &fe, lkpid,
+ MVPP2_COS_TYPE_VLAN);
+ }
+ }
+}
+
+/* Update the flow index for flow of lkpid */
+void mv_pp2x_cls_lkp_flow_set(struct mv_pp2x_hw *hw, int lkpid, int way,
+ int flow_idx)
+{
+ struct mv_pp2x_cls_lookup_entry le;
+
+ mv_pp2x_cls_lookup_read(hw, lkpid, way, &le);
+ mv_pp2x_cls_sw_lkp_flow_set(&le, flow_idx);
+ mv_pp2x_cls_lookup_write(hw, &le);
+}
+
+int mv_pp2x_cls_lkp_port_way_set(struct mv_pp2x_hw *hw, int port, int way)
+{
+ unsigned int val;
+
+ if (mv_pp2x_range_validate(port, 0, MVPP2_MAX_PORTS - 1) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(way, 0, ONE_BIT_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ val = mv_pp2x_read(hw, MVPP2_CLS_PORT_WAY_REG);
+ if (way == 1)
+ val |= MVPP2_CLS_PORT_WAY_MASK(port);
+ else
+ val &= ~MVPP2_CLS_PORT_WAY_MASK(port);
+ mv_pp2x_write(hw, MVPP2_CLS_PORT_WAY_REG, val);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_lkp_port_way_set);
+
+int mv_pp2x_cls_hw_udf_set(struct mv_pp2x_hw *hw, int udf_no, int offs_id,
+ int offs_bits, int size_bits)
+{
+ unsigned int reg_val;
+
+ if (mv_pp2x_range_validate(offs_id, 0,
+ MVPP2_CLS_UDF_OFFSET_ID_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(offs_bits, 0,
+ MVPP2_CLS_UDF_REL_OFFSET_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(size_bits, 0,
+ MVPP2_CLS_UDF_SIZE_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(udf_no, 0,
+ MVPP2_CLS_UDF_REGS_NUM - 1) == MV_ERROR)
+ return MV_ERROR;
+
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS_UDF_REG(udf_no));
+ reg_val &= ~MVPP2_CLS_UDF_OFFSET_ID_MASK;
+ reg_val &= ~MVPP2_CLS_UDF_REL_OFFSET_MASK;
+ reg_val &= ~MVPP2_CLS_UDF_SIZE_MASK;
+
+ reg_val |= (offs_id << MVPP2_CLS_UDF_OFFSET_ID_OFFS);
+ reg_val |= (offs_bits << MVPP2_CLS_UDF_REL_OFFSET_OFFS);
+ reg_val |= (size_bits << MVPP2_CLS_UDF_SIZE_OFFS);
+
+ mv_pp2x_write(hw, MVPP2_CLS_UDF_REG(udf_no), reg_val);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_udf_set);
+
+/* Init lookup decoding table with lookup id */
+void mv_pp2x_cls_lookup_tbl_config(struct mv_pp2x_hw *hw)
+{
+ int index, flow_idx;
+ int data[MVPP2_LKP_PTR_NUM];
+ struct mv_pp2x_cls_lookup_entry le;
+ struct mv_pp2x_cls_flow_info *flow_info;
+
+ memset(&le, 0, sizeof(struct mv_pp2x_cls_lookup_entry));
+ /* Enable classifier engine */
+ mv_pp2x_cls_sw_lkp_en_set(&le, 1);
+
+ for (index = 0; index < (MVPP2_PRS_FL_LAST - MVPP2_PRS_FL_START);
+ index++) {
+ int i, j;
+
+ flow_info = &hw->cls_shadow->flow_info[index];
+ /* Init data[] as invalid value */
+ for (i = 0; i < MVPP2_LKP_PTR_NUM; i++)
+ data[i] = MVPP2_FLOW_TBL_SIZE;
+ le.lkpid = hw->cls_shadow->flow_info[index].lkpid;
+ /* Find the min non-zero one in flow_entry_dflt,
+ * flow_entry_vlan, and flow_entry_dscp
+ */
+ j = 0;
+ if (flow_info->flow_entry_dflt)
+ data[j++] = flow_info->flow_entry_dflt;
+ if (flow_info->flow_entry_vlan)
+ data[j++] = flow_info->flow_entry_vlan;
+ if (flow_info->flow_entry_dscp)
+ data[j++] = flow_info->flow_entry_dscp;
+ /* Get the lookup table entry pointer */
+ flow_idx = data[0];
+ for (i = 0; i < j; i++) {
+ if (flow_idx > data[i])
+ flow_idx = data[i];
+ }
+
+ /* Set flow pointer index */
+ mv_pp2x_cls_sw_lkp_flow_set(&le, flow_idx);
+
+ /* Set initial rx queue */
+ mv_pp2x_cls_sw_lkp_rxq_set(&le, 0x0);
+
+ le.way = 0;
+
+ /* Update lookup ID table entry */
+ mv_pp2x_cls_lookup_write(hw, &le);
+
+ le.way = 1;
+
+ /* Update lookup ID table entry */
+ mv_pp2x_cls_lookup_write(hw, &le);
+ }
+}
+
+/* Classifier default initialization */
+int mv_pp2x_cls_init(struct platform_device *pdev, struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_cls_lookup_entry le;
+ struct mv_pp2x_cls_flow_entry fe;
+ int index;
+
+ /* Enable classifier */
+ mv_pp2x_write(hw, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+ /* Clear classifier flow table */
+ memset(&fe.data, 0, sizeof(fe.data));
+ for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+ fe.index = index;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+
+ /* Clear classifier lookup table */
+ le.data = 0;
+ for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+ le.lkpid = index;
+ le.way = 0;
+ mv_pp2x_cls_lookup_write(hw, &le);
+
+ le.way = 1;
+ mv_pp2x_cls_lookup_write(hw, &le);
+ }
+
+ hw->cls_shadow = devm_kcalloc(&pdev->dev, 1,
+ sizeof(struct mv_pp2x_cls_shadow),
+ GFP_KERNEL);
+ if (!hw->cls_shadow)
+ return -ENOMEM;
+
+ hw->cls_shadow->flow_info = devm_kcalloc(&pdev->dev,
+ (MVPP2_PRS_FL_LAST - MVPP2_PRS_FL_START),
+ sizeof(struct mv_pp2x_cls_flow_info),
+ GFP_KERNEL);
+ if (!hw->cls_shadow->flow_info)
+ return -ENOMEM;
+
+ /* Start from entry 1 to allocate flow table */
+ hw->cls_shadow->flow_free_start = 1;
+ for (index = 0; index < (MVPP2_PRS_FL_LAST - MVPP2_PRS_FL_START);
+ index++)
+ hw->cls_shadow->flow_info[index].lkpid = index +
+ MVPP2_PRS_FL_START;
+
+ /* Init flow table */
+ mv_pp2x_cls_flow_tbl_config(hw);
+
+ /* Init lookup table */
+ mv_pp2x_cls_lookup_tbl_config(hw);
+
+ return 0;
+}
+
+void mv_pp2x_cls_port_config(struct mv_pp2x_port *port)
+{
+ struct mv_pp2x_cls_lookup_entry le;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+ u32 val;
+
+ /* Set way for the port */
+ val = mv_pp2x_read(hw, MVPP2_CLS_PORT_WAY_REG);
+ val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+ mv_pp2x_write(hw, MVPP2_CLS_PORT_WAY_REG, val);
+
+ /* Pick the entry to be accessed in lookup ID decoding table
+ * according to the way and lkpid.
+ */
+ le.lkpid = port->id;
+ le.way = 0;
+ le.data = 0;
+
+ /* Set initial CPU queue for receiving packets */
+ le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+ le.data |= port->first_rxq;
+
+ /* Disable classification engines */
+ le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ /* Update lookup ID table entry */
+ mv_pp2x_cls_lookup_write(hw, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+void mv_pp2x_cls_oversize_rxq_set(struct mv_pp2x_port *port)
+{
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ mv_pp2x_write(hw, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+ port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+}
+
+void mv_pp21_get_mac_address(struct mv_pp2x_port *port, unsigned char *addr)
+{
+ u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+ mac_addr_l = readl(port->priv->hw.base + MVPP2_GMAC_CTRL_1_REG) +
+ (port->id << MVPP2_GMAC_SA_LOW_OFFS);
+ mac_addr_m = readl(port->priv->hw.lms_base + MVPP2_SRC_ADDR_MIDDLE);
+ mac_addr_h = readl(port->priv->hw.lms_base + MVPP2_SRC_ADDR_HIGH);
+ addr[0] = (mac_addr_h >> 24) & 0xFF;
+ addr[1] = (mac_addr_h >> 16) & 0xFF;
+ addr[2] = (mac_addr_h >> 8) & 0xFF;
+ addr[3] = mac_addr_h & 0xFF;
+ addr[4] = mac_addr_m & 0xFF;
+ addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+void mv_pp2x_cause_error(struct net_device *dev, int cause)
+{
+ if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+ netdev_err(dev, "FCS error\n");
+ if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+ netdev_err(dev, "rx fifo overrun error\n");
+ if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+ netdev_err(dev, "tx fifo underrun error\n");
+}
+
+/* Display more error info */
+void mv_pp2x_rx_error(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_desc *rx_desc)
+{
+ u32 status = rx_desc->status;
+
+ switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+ case MVPP2_RXD_ERR_CRC:
+ netdev_err(port->dev,
+ "bad rx status %08x (crc error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVPP2_RXD_ERR_OVERRUN:
+ netdev_err(port->dev,
+ "bad rx status %08x (overrun error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ case MVPP2_RXD_ERR_RESOURCE:
+ netdev_err(port->dev,
+ "bad rx status %08x (resource error), size=%d\n",
+ status, rx_desc->data_size);
+ break;
+ }
+}
+
+/* Handle RX checksum offload */
+void mv_pp2x_rx_csum(struct mv_pp2x_port *port, u32 status,
+ struct sk_buff *skb)
+{
+ if (((status & MVPP2_RXD_L3_IP4) &&
+ !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+ (status & MVPP2_RXD_L3_IP6))
+ if (((status & MVPP2_RXD_L4_UDP) ||
+ (status & MVPP2_RXD_L4_TCP)) &&
+ (status & MVPP2_RXD_L4_CSUM_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 1;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+void mv_pp2x_rx_pkts_coal_set(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_queue *rxq)
+{
+ if (rxq->pkts_coal > MVPP2_MAX_OCCUPIED_THRESH)
+ rxq->pkts_coal = MVPP2_MAX_OCCUPIED_THRESH;
+
+ mv_pp2x_write(&port->priv->hw, MVPP2_RXQ_NUM_REG, rxq->id);
+ mv_pp2x_write(&port->priv->hw, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
+}
+
+/* Set the time delay in usec before Rx interrupt */
+void mv_pp2x_rx_time_coal_set(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_queue *rxq)
+{
+ u32 val = usec_to_cycles(rxq->time_coal, port->priv->hw.tclk);
+
+ if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
+ rxq->time_coal = cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD,
+ port->priv->hw.tclk);
+ val = usec_to_cycles(rxq->time_coal, port->priv->hw.tclk);
+ }
+ mv_pp2x_write(&port->priv->hw,
+ MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+void mv_pp2x_tx_done_pkts_coal_set(void *arg)
+{
+ struct mv_pp2x_port *port = arg;
+ int queue;
+ u32 val;
+
+ for (queue = 0; queue < port->num_tx_queues; queue++) {
+ struct mv_pp2x_tx_queue *txq = port->txqs[queue];
+
+ if (txq->pkts_coal > MVPP2_MAX_TRANSMITTED_THRESH)
+ txq->pkts_coal = MVPP2_MAX_TRANSMITTED_THRESH;
+ val = (txq->pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
+ MVPP2_TRANSMITTED_THRESH_MASK;
+ mv_pp2x_write(&port->priv->hw, MVPP2_TXQ_NUM_REG, txq->id);
+ mv_pp2x_write(&port->priv->hw, MVPP2_TXQ_THRESH_REG, val);
+ }
+}
+
+/* Set the time delay in usec before Rx interrupt */
+void mv_pp2x_tx_done_time_coal_set(struct mv_pp2x_port *port, u32 usec)
+{
+ u32 val = usec_to_cycles(usec, port->priv->hw.tclk);
+
+ if (val > MVPP22_MAX_ISR_TX_THRESHOLD)
+ val = MVPP22_MAX_ISR_TX_THRESHOLD;
+ mv_pp2x_write(&port->priv->hw,
+ MVPP22_ISR_TX_THRESHOLD_REG(port->id), val);
+}
+
+/* Change maximum receive size of the port */
+void mv_pp21_gmac_max_rx_size_set(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+ val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+ MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set max sizes for Tx queues */
+void mv_pp2x_txp_max_tx_size_set(struct mv_pp2x_port *port)
+{
+ u32 val, size, mtu;
+ int txq, tx_port_num;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ mtu = port->pkt_size * 8;
+ if (mtu > MVPP2_TXP_MTU_MAX)
+ mtu = MVPP2_TXP_MTU_MAX;
+
+ /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+ mtu = 3 * mtu;
+
+ /* Indirect access to registers */
+ tx_port_num = mv_pp2x_egress_port(port);
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+ /* Set MTU */
+ val = mv_pp2x_read(hw, MVPP2_TXP_SCHED_MTU_REG);
+ val &= ~MVPP2_TXP_MTU_MAX;
+ val |= mtu;
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_MTU_REG, val);
+
+ /* TXP token size and all TXQs token size must be larger that MTU */
+ val = mv_pp2x_read(hw, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+ size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+ val |= size;
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+ }
+
+ for (txq = 0; txq < port->num_tx_queues; txq++) {
+ val = mv_pp2x_read(hw,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+ size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+ if (size < mtu) {
+ size = mtu;
+ val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+ val |= size;
+ mv_pp2x_write(hw,
+ MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+ val);
+ }
+ }
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+u32 mv_pp2x_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto)
+{
+ u32 command;
+
+ /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+ * G_L4_chk, L4_type required only for checksum calculation
+ */
+ command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+ command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+ command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+ if (l3_proto == ETH_P_IP) {
+ command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
+ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
+ } else {
+ command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
+ }
+
+ if (l4_proto == IPPROTO_TCP) {
+ command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else if (l4_proto == IPPROTO_UDP) {
+ command |= MVPP2_TXD_L4_UDP; /* enable UDP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else {
+ command |= MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+
+ /* Tx descriptors helper methods */
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+int mv_pp2x_txq_pend_desc_num_get(struct mv_pp2x_port *port,
+ struct mv_pp2x_tx_queue *txq)
+{
+ u32 val;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ mv_pp2x_write(hw, MVPP2_TXQ_NUM_REG, txq->id);
+ val = mv_pp2x_read(hw, MVPP2_TXQ_PENDING_REG);
+
+ return val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+struct mv_pp2x_tx_desc *mv_pp2x_txq_next_desc_get(
+ struct mv_pp2x_aggr_tx_queue *aggr_txq)
+{
+ int tx_desc = aggr_txq->next_desc_to_proc;
+
+ aggr_txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(aggr_txq, tx_desc);
+ return aggr_txq->first_desc + tx_desc;
+}
+
+/* Get pointer to previous aggregated TX descriptor for rollback when needed */
+struct mv_pp2x_tx_desc *mv_pp2x_txq_prev_desc_get(
+ struct mv_pp2x_aggr_tx_queue *aggr_txq)
+{
+ int tx_desc = aggr_txq->next_desc_to_proc;
+
+ if (tx_desc > 0)
+ aggr_txq->next_desc_to_proc = tx_desc - 1;
+ else
+ aggr_txq->next_desc_to_proc = aggr_txq->last_desc;
+
+ return (aggr_txq->first_desc + tx_desc);
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent */
+void mv_pp2x_aggr_txq_pend_desc_add(struct mv_pp2x_port *port, int pending)
+{
+ /* aggregated access - relevant TXQ number is written in TX desc */
+ mv_pp2x_write(&port->priv->hw, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+int mv_pp2x_aggr_desc_num_read(struct mv_pp2x *priv, int cpu)
+{
+ u32 val = mv_pp2x_read(&priv->hw, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+ return(val & MVPP2_AGGR_TXQ_PENDING_MASK);
+}
+EXPORT_SYMBOL(mv_pp2x_aggr_desc_num_read);
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ */
+int mv_pp2x_aggr_desc_num_check(struct mv_pp2x *priv,
+ struct mv_pp2x_aggr_tx_queue *aggr_txq,
+ int num, int cpu)
+{
+ if ((aggr_txq->count + num) > aggr_txq->size) {
+ /* Update number of occupied aggregated Tx descriptors */
+ u32 val = mv_pp2x_relaxed_read(&priv->hw,
+ MVPP2_AGGR_TXQ_STATUS_REG(cpu), cpu);
+
+ aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+
+ if ((aggr_txq->count + num) > aggr_txq->size)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+int mv_pp2x_txq_alloc_reserved_desc(struct mv_pp2x *priv,
+ struct mv_pp2x_tx_queue *txq, int num, int cpu)
+{
+ u32 val;
+
+ val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+ mv_pp2x_relaxed_write(&priv->hw, MVPP2_TXQ_RSVD_REQ_REG, val, cpu);
+
+ val = mv_pp2x_relaxed_read(&priv->hw, MVPP2_TXQ_RSVD_RSLT_REG, cpu);
+
+ return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Set rx queue offset */
+void mv_pp2x_rxq_offset_set(struct mv_pp2x_port *port,
+ int prxq, int offset)
+{
+ u32 val;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ /* Convert offset from bytes to units of 32 bytes */
+ offset = offset >> 5;
+
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+ /* Offset is in */
+ val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+ MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Port configuration routines */
+
+void mv_pp21_port_mii_set(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ switch (port->mac_data.phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= MVPP2_GMAC_INBAND_AN_MASK;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= MVPP2_GMAC_PORT_RGMII_MASK;
+ default:
+ val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+ }
+
+ writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+void mv_pp21_port_fc_adv_enable(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+void mv_pp21_port_enable(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val |= MVPP2_GMAC_PORT_EN_MASK;
+ val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+void mv_pp21_port_disable(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+ val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+void mv_pp21_port_periodic_xon_disable(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+ ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+void mv_pp21_port_loopback_set(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+ if (port->mac_data.speed == 1000)
+ val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+ if (port->mac_data.phy_mode == PHY_INTERFACE_MODE_SGMII)
+ val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+ else
+ val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+ writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+void mv_pp21_port_reset(struct mv_pp2x_port *port)
+{
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ ~MVPP2_GMAC_PORT_RESET_MASK;
+ writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+}
+
+/* Refill BM pool */
+void mv_pp2x_pool_refill(struct mv_pp2x *priv, u32 pool,
+ dma_addr_t phys_addr, int cpu)
+{
+ mv_pp2x_bm_pool_put(&priv->hw, pool, phys_addr, cpu);
+}
+
+void mv_pp2x_pool_refill_virtual(struct mv_pp2x *priv, u32 pool,
+ dma_addr_t phys_addr, u8 *cookie)
+{
+ int cpu = smp_processor_id();
+
+ mv_pp2x_bm_pool_put_virtual(&priv->hw, pool, phys_addr, cookie, cpu);
+}
+
+/* Set pool buffer size */
+void mv_pp2x_bm_pool_bufsize_set(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_bm_pool *bm_pool, int buf_size)
+{
+ u32 val;
+
+ bm_pool->buf_size = buf_size;
+
+ val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+ mv_pp2x_write(hw, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+/* Attach long pool to rxq */
+void mv_pp21_rxq_long_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int long_pool)
+{
+ u32 val;
+
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP21_RXQ_POOL_LONG_MASK;
+ val |= ((long_pool << MVPP21_RXQ_POOL_LONG_OFFS) &
+ MVPP21_RXQ_POOL_LONG_MASK);
+
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+void mv_pp21_rxq_short_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int short_pool)
+{
+ u32 val;
+
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP21_RXQ_POOL_SHORT_MASK;
+ val |= ((short_pool << MVPP21_RXQ_POOL_SHORT_OFFS) &
+ MVPP21_RXQ_POOL_SHORT_MASK);
+
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach long pool to rxq */
+void mv_pp22_rxq_long_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int long_pool)
+{
+ u32 val;
+
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP22_RXQ_POOL_LONG_MASK;
+ val |= ((long_pool << MVPP22_RXQ_POOL_LONG_OFFS) &
+ MVPP22_RXQ_POOL_LONG_MASK);
+
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+void mv_pp22_rxq_short_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int short_pool)
+{
+ u32 val;
+
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(prxq));
+ val &= ~MVPP22_RXQ_POOL_SHORT_MASK;
+ val |= ((short_pool << MVPP22_RXQ_POOL_SHORT_OFFS) &
+ MVPP22_RXQ_POOL_SHORT_MASK);
+
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Enable/disable receiving packets */
+void mv_pp2x_ingress_enable(struct mv_pp2x_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ for (lrxq = 0; lrxq < port->num_rx_queues; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(queue));
+ val &= ~MVPP2_RXQ_DISABLE_MASK;
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+void mv_pp2x_ingress_disable(struct mv_pp2x_port *port)
+{
+ u32 val;
+ int lrxq, queue;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ for (lrxq = 0; lrxq < port->num_rx_queues; lrxq++) {
+ queue = port->rxqs[lrxq]->id;
+ val = mv_pp2x_read(hw, MVPP2_RXQ_CONFIG_REG(queue));
+ val |= MVPP2_RXQ_DISABLE_MASK;
+ mv_pp2x_write(hw, MVPP2_RXQ_CONFIG_REG(queue), val);
+ }
+}
+
+void mv_pp2x_egress_enable(struct mv_pp2x_port *port)
+{
+ u32 qmap;
+ int queue;
+ int tx_port_num = mv_pp2x_egress_port(port);
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ /* Enable all initialized TXs. */
+ qmap = 0;
+ for (queue = 0; queue < port->num_tx_queues; queue++) {
+ struct mv_pp2x_tx_queue *txq = port->txqs[queue];
+
+ if (txq->first_desc)
+ qmap |= (1 << queue);
+ }
+
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+
+ pr_debug("tx_port_num=%d qmap=0x%x\n", tx_port_num, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+void mv_pp2x_egress_disable(struct mv_pp2x_port *port)
+{
+ u32 reg_data;
+ int delay;
+ int tx_port_num = mv_pp2x_egress_port(port);
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ /* Issue stop command for active channels only */
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+ reg_data = (mv_pp2x_read(hw, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+ MVPP2_TXP_SCHED_ENQ_MASK;
+ if (reg_data != 0)
+ mv_pp2x_write(hw, MVPP2_TXP_SCHED_Q_CMD_REG,
+ (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+ /* Wait for all Tx activity to terminate. */
+ delay = 0;
+ do {
+ if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+ netdev_warn(port->dev,
+ "Tx stop timed out, status=0x%08x\n",
+ reg_data);
+ break;
+ }
+ mdelay(1);
+ delay++;
+
+ /* Check port TX Command register that all
+ * Tx queues are stopped
+ */
+ reg_data = mv_pp2x_read(hw, MVPP2_TXP_SCHED_Q_CMD_REG);
+ } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Parser default initialization */
+int mv_pp2x_prs_default_init(struct platform_device *pdev,
+ struct mv_pp2x_hw *hw)
+{
+ int err, index, i;
+
+ /* Enable tcam table */
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+ /* Clear all tcam and sram entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+ mv_pp2x_write(hw, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+ mv_pp2x_write(hw, MVPP2_PRS_SRAM_IDX_REG, index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+ mv_pp2x_write(hw, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+ }
+
+ /* Invalidate all tcam entries */
+ for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+ mv_pp2x_prs_hw_inv(hw, index);
+
+ hw->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+ sizeof(struct mv_pp2x_prs_shadow),
+ GFP_KERNEL);
+
+ if (!hw->prs_shadow)
+ return -ENOMEM;
+
+ /* Always start from lookup = 0 */
+ for (index = 0; index < MVPP2_MAX_PORTS; index++)
+ mv_pp2x_prs_hw_port_init(hw, index, MVPP2_PRS_LU_MH,
+ MVPP2_PRS_PORT_LU_MAX, 0);
+
+ mv_pp2x_prs_def_flow_init(hw);
+
+ mv_pp2x_prs_mh_init(hw);
+
+ mv_pp2x_prs_mac_init(hw);
+
+ mv_pp2x_prs_dsa_init(hw);
+
+ err = mv_pp2x_prs_etype_init(hw);
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_vlan_init(pdev, hw);
+ if (err)
+ return err;
+ err = mv_pp2x_prs_pppoe_init(hw);
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip6_init(hw);
+ if (err)
+ return err;
+
+ err = mv_pp2x_prs_ip4_init(hw);
+ if (err)
+ return err;
+ return 0;
+}
+
+/* shift to (current offset + shift) */
+int mv_pp2x_prs_sw_sram_shift_set(struct mv_pp2x_prs_entry *pe,
+ int shift, unsigned int op)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(shift, 0 - MVPP2_PRS_SRAM_SHIFT_MASK,
+ MVPP2_PRS_SRAM_SHIFT_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(op, 0,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ /* Set sign */
+ if (shift < 0) {
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT)] |=
+ (1 << (MVPP2_PRS_SRAM_SHIFT_SIGN_BIT % 8));
+ shift = 0 - shift;
+ } else
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT)] &=
+ (~(1 << (MVPP2_PRS_SRAM_SHIFT_SIGN_BIT % 8)));
+
+ /* Set offset */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_OFFS)] = (unsigned char)shift;
+
+ /* Reset and Set operation */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS)] &=
+ ~(MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK <<
+ (MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS % 8));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS)] |=
+ (op << (MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS % 8));
+
+ /* Set base offset as current */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS)] &=
+ (~(1 << (MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS % 8)));
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_shift_set);
+
+int mv_pp2x_prs_sw_sram_shift_get(struct mv_pp2x_prs_entry *pe, int *shift)
+{
+ int sign;
+
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+ if (mv_pp2x_ptr_validate(shift) == MV_ERROR)
+ return MV_ERROR;
+
+ sign = pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT)] &
+ (1 << (MVPP2_PRS_SRAM_SHIFT_SIGN_BIT % 8));
+ *shift = ((int)(pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_OFFS)])) &
+ MVPP2_PRS_SRAM_SHIFT_MASK;
+
+ if (sign == 1)
+ *shift *= -1;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_shift_get);
+
+int mv_pp2x_prs_sw_sram_offset_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int type, int offset,
+ unsigned int op)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(offset, 0 - MVPP2_PRS_SRAM_UDF_MASK,
+ MVPP2_PRS_SRAM_UDF_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(type, 0,
+ MVPP2_PRS_SRAM_UDF_TYPE_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(op, 0,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_MASK) == MV_ERROR)
+ return MV_ERROR;
+
+ /* Set offset sign */
+ if (offset < 0) {
+ offset = 0 - offset;
+ /* set sram offset sign bit */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT)] |=
+ (1 << (MVPP2_PRS_SRAM_SHIFT_SIGN_BIT % 8));
+ } else
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_SHIFT_SIGN_BIT)] &=
+ (~(1 << (MVPP2_PRS_SRAM_SHIFT_SIGN_BIT % 8)));
+
+ /* set offset value */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_OFFS)] &=
+ (~(MVPP2_PRS_SRAM_UDF_MASK <<
+ (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_OFFS)] |=
+ (offset << (MVPP2_PRS_SRAM_UDF_OFFS % 8));
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_UDF_MASK >>
+ (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS)] |=
+ (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+ /* set offset type */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_TYPE_OFFS)] &=
+ ~(MVPP2_PRS_SRAM_UDF_TYPE_MASK <<
+ (MVPP2_PRS_SRAM_UDF_TYPE_OFFS % 8));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_TYPE_OFFS)] |=
+ (type << (MVPP2_PRS_SRAM_UDF_TYPE_OFFS % 8));
+
+ /* Set offset operation */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS)] &=
+ ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK <<
+ (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS)] |=
+ (op << (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+ ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+ (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+ MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+ (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+ /* Set base offset as current */
+ pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS)] &=
+ (~(1 << (MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS % 8)));
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_offset_set);
+
+int mv_pp2x_prs_sw_sram_offset_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *type, int *offset,
+ unsigned int *op)
+{
+ int sign;
+
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(offset) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(type) == MV_ERROR)
+ return MV_ERROR;
+
+ *type = pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_TYPE_OFFS)] >>
+ (MVPP2_PRS_SRAM_UDF_TYPE_OFFS % 8);
+ *type &= MVPP2_PRS_SRAM_UDF_TYPE_MASK;
+
+ *offset = (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_OFFS)] >>
+ (MVPP2_PRS_SRAM_UDF_OFFS % 8)) & 0x7f;
+ *offset |= (pe->sram.byte[
+ SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS) +
+ SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS)] <<
+ (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))) & 0x80;
+
+ *op = (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS)] >>
+ (MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS % 8)) & 0x7;
+ *op |= (pe->sram.byte[HW_BYTE_OFFS(
+ SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS) +
+ SRAM_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS))] <<
+ (8 - (MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS % 8))) & 0x18;
+
+ /* if signed bit is tes */
+ sign = pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_UDF_SIGN_BIT)] &
+ (1 << (MVPP2_PRS_SRAM_UDF_SIGN_BIT % 8));
+ if (sign != 0)
+ *offset = 1 - (*offset);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_offset_get);
+
+int mv_pp2x_prs_sw_sram_next_lu_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *lu)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(lu) == MV_ERROR)
+ return MV_ERROR;
+
+ *lu = pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_NEXT_LU_OFFS)];
+ *lu = ((*lu) >> MVPP2_PRS_SRAM_NEXT_LU_OFFS % 8);
+ *lu &= MVPP2_PRS_SRAM_NEXT_LU_MASK;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_next_lu_get);
+
+int mv_pp2x_prs_sram_bit_get(struct mv_pp2x_prs_entry *pe, int bit_num,
+ unsigned int *bit)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ *bit = pe->sram.byte[SRAM_BIT_TO_BYTE(bit_num)] &
+ (1 << (bit_num % 8));
+ *bit = (*bit) >> (bit_num % 8);
+ return MV_OK;
+}
+
+void mv_pp2x_prs_sw_sram_lu_done_set(struct mv_pp2x_prs_entry *pe)
+{
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_lu_done_set);
+
+void mv_pp2x_prs_sw_sram_lu_done_clear(struct mv_pp2x_prs_entry *pe)
+{
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_lu_done_clear);
+
+int mv_pp2x_prs_sw_sram_lu_done_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bit)
+{
+ return mv_pp2x_prs_sram_bit_get(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, bit);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_lu_done_get);
+
+void mv_pp2x_prs_sw_sram_flowid_set(struct mv_pp2x_prs_entry *pe)
+{
+ mv_pp2x_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_flowid_set);
+
+void mv_pp2x_prs_sw_sram_flowid_clear(struct mv_pp2x_prs_entry *pe)
+{
+ mv_pp2x_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_flowid_clear);
+
+int mv_pp2x_prs_sw_sram_flowid_gen_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bit)
+{
+ return mv_pp2x_prs_sram_bit_get(pe, MVPP2_PRS_SRAM_LU_GEN_BIT, bit);
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_flowid_gen_get);
+
+/* return RI and RI_UPDATE */
+int mv_pp2x_prs_sw_sram_ri_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bits, unsigned int *enable)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(bits) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(enable) == MV_ERROR)
+ return MV_ERROR;
+
+ *bits = pe->sram.word[MVPP2_PRS_SRAM_RI_OFFS / 32];
+ *enable = pe->sram.word[MVPP2_PRS_SRAM_RI_CTRL_OFFS / 32];
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_ri_get);
+
+int mv_pp2x_prs_sw_sram_ai_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bits, unsigned int *enable)
+{
+ if (mv_pp2x_ptr_validate(pe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(bits) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(enable) == MV_ERROR)
+ return MV_ERROR;
+
+ *bits = (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_AI_OFFS)] >> (MVPP2_PRS_SRAM_AI_OFFS % 8)) |
+ (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_AI_OFFS +
+ MVPP2_PRS_SRAM_AI_CTRL_BITS)] <<
+ (8 - (MVPP2_PRS_SRAM_AI_OFFS % 8)));
+
+ *enable = (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_AI_CTRL_OFFS)] >>
+ (MVPP2_PRS_SRAM_AI_CTRL_OFFS % 8)) |
+ (pe->sram.byte[SRAM_BIT_TO_BYTE(
+ MVPP2_PRS_SRAM_AI_CTRL_OFFS +
+ MVPP2_PRS_SRAM_AI_CTRL_BITS)] <<
+ (8 - (MVPP2_PRS_SRAM_AI_CTRL_OFFS % 8)));
+
+ *bits &= MVPP2_PRS_SRAM_AI_MASK;
+ *enable &= MVPP2_PRS_SRAM_AI_MASK;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_prs_sw_sram_ai_get);
+
+/*#include "mvPp2ClsHw.h" */
+
+/********************************************************************/
+/***************** Classifier Top Public lkpid table APIs ********************/
+/********************************************************************/
+
+/*------------------------------------------------------------------*/
+
+int mv_pp2x_cls_hw_lkp_read(struct mv_pp2x_hw *hw, int lkpid, int way,
+ struct mv_pp2x_cls_lookup_entry *fe)
+{
+ unsigned int reg_val = 0;
+
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(way, 0, WAY_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(lkpid, 0,
+ MVPP2_CLS_FLOWS_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ /* write index reg */
+ reg_val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) |
+ (lkpid << MVPP2_CLS_LKP_INDEX_LKP_OFFS);
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_INDEX_REG, reg_val);
+
+ fe->way = way;
+ fe->lkpid = lkpid;
+
+ fe->data = mv_pp2x_read(hw, MVPP2_CLS_LKP_TBL_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_lkp_read);
+
+int mv_pp2x_cls_hw_lkp_write(struct mv_pp2x_hw *hw, int lkpid,
+ int way, struct mv_pp2x_cls_lookup_entry *fe)
+{
+ unsigned int reg_val = 0;
+
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(way, 0, 1) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(lkpid, 0,
+ MVPP2_CLS_FLOWS_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ /* write index reg */
+ reg_val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) |
+ (lkpid << MVPP2_CLS_LKP_INDEX_LKP_OFFS);
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_INDEX_REG, reg_val);
+
+ /* write flowId reg */
+ mv_pp2x_write(hw, MVPP2_CLS_LKP_TBL_REG, fe->data);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_lkp_write);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_lkp_rxq_get(struct mv_pp2x_cls_lookup_entry *lkp, int *rxq)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(rxq) == MV_ERROR)
+ return MV_ERROR;
+
+ *rxq = (lkp->data & MVPP2_FLOWID_RXQ_MASK) >> MVPP2_FLOWID_RXQ;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_rxq_get);
+
+int mv_pp2x_cls_sw_lkp_rxq_set(struct mv_pp2x_cls_lookup_entry *lkp, int rxq)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(rxq, 0,
+ (1 << MVPP2_FLOWID_RXQ_BITS) - 1) == MV_ERROR)
+ return MV_ERROR;
+
+ lkp->data &= ~MVPP2_FLOWID_RXQ_MASK;
+ lkp->data |= (rxq << MVPP2_FLOWID_RXQ);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_rxq_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_lkp_en_get(struct mv_pp2x_cls_lookup_entry *lkp, int *en)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(en) == MV_ERROR)
+ return MV_ERROR;
+
+ *en = (lkp->data & MVPP2_FLOWID_EN_MASK) >> MVPP2_FLOWID_EN;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_en_get);
+
+int mv_pp2x_cls_sw_lkp_en_set(struct mv_pp2x_cls_lookup_entry *lkp, int en)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(en, 0, 1) == MV_ERROR)
+ return MV_ERROR;
+
+ lkp->data &= ~MVPP2_FLOWID_EN_MASK;
+ lkp->data |= (en << MVPP2_FLOWID_EN);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_en_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_lkp_flow_get(struct mv_pp2x_cls_lookup_entry *lkp,
+ int *flow_idx)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(flow_idx) == MV_ERROR)
+ return MV_ERROR;
+
+ *flow_idx = (lkp->data & MVPP2_FLOWID_FLOW_MASK) >> MVPP2_FLOWID_FLOW;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_flow_get);
+
+int mv_pp2x_cls_sw_lkp_flow_set(struct mv_pp2x_cls_lookup_entry *lkp,
+ int flow_idx)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(flow_idx, 0,
+ MVPP2_CLS_FLOWS_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ lkp->data &= ~MVPP2_FLOWID_FLOW_MASK;
+ lkp->data |= (flow_idx << MVPP2_FLOWID_FLOW);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_flow_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_lkp_mod_get(struct mv_pp2x_cls_lookup_entry *lkp,
+ int *mod_base)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(mod_base) == MV_ERROR)
+ return MV_ERROR;
+
+ *mod_base = (lkp->data & MVPP2_FLOWID_MODE_MASK) >> MVPP2_FLOWID_MODE;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_mod_get);
+
+int mv_pp2x_cls_sw_lkp_mod_set(struct mv_pp2x_cls_lookup_entry *lkp,
+ int mod_base)
+{
+ if (mv_pp2x_ptr_validate(lkp) == MV_ERROR)
+ return MV_ERROR;
+
+ /* TODO: what is the max value of mode base */
+ if (mv_pp2x_range_validate(mod_base, 0,
+ (1 << MVPP2_FLOWID_MODE_BITS) - 1) == MV_ERROR)
+ return MV_ERROR;
+
+ lkp->data &= ~MVPP2_FLOWID_MODE_MASK;
+ lkp->data |= (mod_base << MVPP2_FLOWID_MODE);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_lkp_mod_set);
+
+/*********************************************************************/
+/***************** Classifier Top Public flows table APIs ********************/
+/********************************************************************/
+
+int mv_pp2x_cls_hw_flow_read(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_flow_entry *fe)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(index, 0,
+ MVPP2_CLS_FLOWS_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->index = index;
+
+ /*write index*/
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_INDEX_REG, index);
+
+ fe->data[0] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL0_REG);
+ fe->data[1] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL1_REG);
+ fe->data[2] = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL2_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_flow_read);
+
+/*----------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+
+/*----------------------------------------------------------------------*/
+int mv_pp2x_cls_sw_flow_hek_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *num_of_fields, int field_ids[])
+{
+ int index;
+
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(num_of_fields) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(field_ids) == MV_ERROR)
+ return MV_ERROR;
+
+ *num_of_fields = (fe->data[1] & MVPP2_FLOW_FIELDS_NUM_MASK) >>
+ MVPP2_FLOW_FIELDS_NUM;
+
+ for (index = 0; index < (*num_of_fields); index++)
+ field_ids[index] = ((fe->data[2] &
+ MVPP2_FLOW_FIELD_MASK(index)) >>
+ MVPP2_FLOW_FIELD_ID(index));
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_hek_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_flow_port_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *type, int *portid)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(type) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(portid) == MV_ERROR)
+ return MV_ERROR;
+
+ *type = (fe->data[0] & MVPP2_FLOW_PORT_TYPE_MASK) >>
+ MVPP2_FLOW_PORT_TYPE;
+ *portid = (fe->data[0] & MVPP2_FLOW_PORT_ID_MASK) >>
+ MVPP2_FLOW_PORT_ID;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_port_get);
+
+int mv_pp2x_cls_sw_flow_port_set(struct mv_pp2x_cls_flow_entry *fe,
+ int type, int portid)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(type, 0,
+ ((1 << MVPP2_FLOW_PORT_TYPE_BITS) - 1)) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(portid, 0,
+ ((1 << MVPP2_FLOW_PORT_ID_BITS) - 1)) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_PORT_ID_MASK;
+ fe->data[0] &= ~MVPP2_FLOW_PORT_TYPE_MASK;
+
+ fe->data[0] |= (portid << MVPP2_FLOW_PORT_ID);
+ fe->data[0] |= (type << MVPP2_FLOW_PORT_TYPE);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_port_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_flow_portid_select(struct mv_pp2x_cls_flow_entry *fe,
+ int from)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(from, 0, 1) == MV_ERROR)
+ return MV_ERROR;
+
+ if (from)
+ fe->data[0] |= MVPP2_FLOW_PORT_ID_SEL_MASK;
+ else
+ fe->data[0] &= ~MVPP2_FLOW_PORT_ID_SEL_MASK;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_portid_select);
+
+int mv_pp2x_cls_sw_flow_pppoe_set(struct mv_pp2x_cls_flow_entry *fe, int mode)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(mode, 0, MVPP2_FLOW_PPPOE_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_PPPOE_MASK;
+ fe->data[0] |= (mode << MVPP2_FLOW_PPPOE);
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_pppoe_set);
+
+int mv_pp2x_cls_sw_flow_vlan_set(struct mv_pp2x_cls_flow_entry *fe, int mode)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(mode, 0, MVPP2_FLOW_VLAN_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_VLAN_MASK;
+ fe->data[0] |= (mode << MVPP2_FLOW_VLAN);
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_vlan_set);
+
+/*----------------------------------------------------------------------*/
+int mv_pp2x_cls_sw_flow_macme_set(struct mv_pp2x_cls_flow_entry *fe, int mode)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(mode, 0, MVPP2_FLOW_MACME_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_MACME_MASK;
+ fe->data[0] |= (mode << MVPP2_FLOW_MACME);
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_macme_set);
+
+/*----------------------------------------------------------------------*/
+int mv_pp2x_cls_sw_flow_udf7_set(struct mv_pp2x_cls_flow_entry *fe, int mode)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(mode, 0, MVPP2_FLOW_UDF7_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_UDF7_MASK;
+ fe->data[0] |= (mode << MVPP2_FLOW_UDF7);
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_udf7_set);
+
+int mv_pp2x_cls_sw_flow_seq_ctrl_set(struct mv_pp2x_cls_flow_entry *fe,
+ int mode)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(mode, 0, MVPP2_FLOW_ENGINE_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[1] &= ~MVPP2_FLOW_SEQ_CTRL_MASK;
+ fe->data[1] |= (mode << MVPP2_FLOW_SEQ_CTRL);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_seq_ctrl_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_flow_engine_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *engine, int *is_last)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(engine) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(is_last) == MV_ERROR)
+ return MV_ERROR;
+
+ *engine = (fe->data[0] & MVPP2_FLOW_ENGINE_MASK) >> MVPP2_FLOW_ENGINE;
+ *is_last = fe->data[0] & MVPP2_FLOW_LAST_MASK;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_engine_get);
+
+/*----------------------------------------------------------------------*/
+int mv_pp2x_cls_sw_flow_engine_set(struct mv_pp2x_cls_flow_entry *fe,
+ int engine, int is_last)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(is_last, 0, 1) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[0] &= ~MVPP2_FLOW_LAST_MASK;
+ fe->data[0] &= ~MVPP2_FLOW_ENGINE_MASK;
+
+ fe->data[0] |= is_last;
+ fe->data[0] |= (engine << MVPP2_FLOW_ENGINE);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_engine_set);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_sw_flow_extra_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *type, int *prio)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(type) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(prio) == MV_ERROR)
+ return MV_ERROR;
+
+ *type = (fe->data[1] & MVPP2_FLOW_LKP_TYPE_MASK) >>
+ MVPP2_FLOW_LKP_TYPE;
+ *prio = (fe->data[1] & MVPP2_FLOW_FIELD_PRIO_MASK) >>
+ MVPP2_FLOW_FIELD_PRIO;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_extra_get);
+
+int mv_pp2x_cls_sw_flow_extra_set(struct mv_pp2x_cls_flow_entry *fe,
+ int type, int prio)
+{
+ if (mv_pp2x_ptr_validate(fe) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(type, 0,
+ MVPP2_FLOW_PORT_ID_MAX) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(prio, 0,
+ ((1 << MVPP2_FLOW_FIELD_ID_BITS) - 1)) == MV_ERROR)
+ return MV_ERROR;
+
+ fe->data[1] &= ~MVPP2_FLOW_LKP_TYPE_MASK;
+ fe->data[1] |= (type << MVPP2_FLOW_LKP_TYPE);
+
+ fe->data[1] &= ~MVPP2_FLOW_FIELD_PRIO_MASK;
+ fe->data[1] |= (prio << MVPP2_FLOW_FIELD_PRIO);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_sw_flow_extra_set);
+
+/*----------------------------------------------------------------------*/
+/* Classifier Top Public length change table APIs */
+/*----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------*/
+int mv_pp2x_cls_hw_flow_hit_get(struct mv_pp2x_hw *hw,
+ int index, unsigned int *cnt)
+{
+ if (mv_pp2x_range_validate(index, 0,
+ MVPP2_CLS_FLOWS_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ /*set index */
+ mv_pp2x_write(hw, MVPP2_CNT_IDX_REG, MVPP2_CNT_IDX_FLOW(index));
+
+ if (cnt)
+ *cnt = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL_HIT_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_flow_hit_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_hw_lkp_hit_get(struct mv_pp2x_hw *hw, int lkpid, int way,
+ unsigned int *cnt)
+{
+ if (mv_pp2x_range_validate(way, 0, 1) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(lkpid, 0,
+ MVPP2_CLS_LKP_TBL_SIZE) == MV_ERROR)
+ return MV_ERROR;
+
+ /*set index */
+ mv_pp2x_write(hw, MVPP2_CNT_IDX_REG, MVPP2_CNT_IDX_LKP(lkpid, way));
+
+ if (cnt)
+ *cnt = mv_pp2x_read(hw, MVPP2_CLS_LKP_TBL_HIT_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_hw_lkp_hit_get);
+
+/*----------------------------------------------------------------------*/
+/* Classifier C2 engine QoS table Public APIs */
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_c2_qos_hw_read(struct mv_pp2x_hw *hw, int tbl_id,
+ int tbl_sel, int tbl_line,
+ struct mv_pp2x_cls_c2_qos_entry *qos)
+{
+ unsigned int reg_val = 0;
+
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(tbl_sel, 0, 1) == MV_ERROR) /* one bit */
+ return MV_ERROR;
+
+ if (tbl_sel == 1) {
+ /*dscp*/
+ /* TODO define 8=DSCP_TBL_NUM 64=DSCP_TBL_LINES */
+ if (mv_pp2x_range_validate(tbl_id, 0,
+ MVPP2_QOS_TBL_NUM_DSCP) == MV_ERROR)
+ return MV_ERROR;
+ if (mv_pp2x_range_validate(tbl_line, 0,
+ MVPP2_QOS_TBL_LINE_NUM_DSCP) == MV_ERROR)
+ return MV_ERROR;
+ } else {
+ /*pri*/
+ /* TODO define 64=PRI_TBL_NUM 8=PRI_TBL_LINES */
+ if (mv_pp2x_range_validate(tbl_id, 0,
+ MVPP2_QOS_TBL_NUM_PRI) == MV_ERROR)
+ return MV_ERROR;
+ if (mv_pp2x_range_validate(tbl_line, 0,
+ MVPP2_QOS_TBL_LINE_NUM_PRI) == MV_ERROR)
+ return MV_ERROR;
+ }
+
+ qos->tbl_id = tbl_id;
+ qos->tbl_sel = tbl_sel;
+ qos->tbl_line = tbl_line;
+
+ /* write index reg */
+ reg_val |= (tbl_line << MVPP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+ reg_val |= (tbl_sel << MVPP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+ reg_val |= (tbl_id << MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+
+ mv_pp2x_write(hw, MVPP2_CLS2_DSCP_PRI_INDEX_REG, reg_val);
+
+ /* read data reg*/
+ qos->data = mv_pp2x_read(hw, MVPP2_CLS2_QOS_TBL_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_qos_hw_read);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_qos_prio_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *prio)
+{
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(prio) == MV_ERROR)
+ return MV_ERROR;
+
+ *prio = (qos->data & MVPP2_CLS2_QOS_TBL_PRI_MASK) >>
+ MVPP2_CLS2_QOS_TBL_PRI_OFF;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2_cls_c2_qos_prio_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_qos_dscp_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *dscp)
+{
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(dscp) == MV_ERROR)
+ return MV_ERROR;
+
+ *dscp = (qos->data & MVPP2_CLS2_QOS_TBL_DSCP_MASK) >>
+ MVPP2_CLS2_QOS_TBL_DSCP_OFF;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2_cls_c2_qos_dscp_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_qos_color_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *color)
+{
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(color) == MV_ERROR)
+ return MV_ERROR;
+
+ *color = (qos->data & MVPP2_CLS2_QOS_TBL_COLOR_MASK) >>
+ MVPP2_CLS2_QOS_TBL_COLOR_OFF;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2_cls_c2_qos_color_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_qos_gpid_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *gpid)
+{
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(gpid) == MV_ERROR)
+ return MV_ERROR;
+
+ *gpid = (qos->data & MVPP2_CLS2_QOS_TBL_GEMPORT_MASK) >>
+ MVPP2_CLS2_QOS_TBL_GEMPORT_OFF;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2_cls_c2_qos_gpid_get);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_qos_queue_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *queue)
+{
+ if (mv_pp2x_ptr_validate(qos) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(queue) == MV_ERROR)
+ return MV_ERROR;
+
+ *queue = (qos->data & MVPP2_CLS2_QOS_TBL_QUEUENUM_MASK) >>
+ MVPP2_CLS2_QOS_TBL_QUEUENUM_OFF;
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2_cls_c2_qos_queue_get);
+
+/*----------------------------------------------------------------------*/
+/* Classifier C2 engine TCAM table Public APIs */
+/*----------------------------------------------------------------------*/
+
+/* note: error is not returned if entry is invalid
+ * user should check c2->valid afer returned from this func
+ */
+int mv_pp2x_cls_c2_hw_read(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_c2_entry *c2)
+{
+ unsigned int reg_val;
+ int tcm_idx;
+
+ if (mv_pp2x_ptr_validate(c2) == MV_ERROR)
+ return MV_ERROR;
+
+ c2->index = index;
+
+ /* write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, index);
+
+ /* read inValid bit*/
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_TCAM_INV_REG);
+ c2->inv = (reg_val & MVPP2_CLS2_TCAM_INV_INVALID_MASK) >>
+ MVPP2_CLS2_TCAM_INV_INVALID_OFF;
+
+ if (c2->inv)
+ return MV_OK;
+
+ for (tcm_idx = 0; tcm_idx < MVPP2_CLS_C2_TCAM_WORDS; tcm_idx++)
+ c2->tcam.words[tcm_idx] = mv_pp2x_read(hw,
+ MVPP2_CLS2_TCAM_DATA_REG(tcm_idx));
+
+ /* read action_tbl 0x1B30 */
+ c2->sram.regs.action_tbl = mv_pp2x_read(hw, MVPP2_CLS2_ACT_DATA_REG);
+
+ /* read actions 0x1B60 */
+ c2->sram.regs.actions = mv_pp2x_read(hw, MVPP2_CLS2_ACT_REG);
+
+ /* read qos_attr 0x1B64 */
+ c2->sram.regs.qos_attr = mv_pp2x_read(hw, MVPP2_CLS2_ACT_QOS_ATTR_REG);
+
+ /* read hwf_attr 0x1B68 */
+ c2->sram.regs.hwf_attr = mv_pp2x_read(hw, MVPP2_CLS2_ACT_HWF_ATTR_REG);
+
+ /* read hwf_attr 0x1B6C */
+ c2->sram.regs.rss_attr = mv_pp2x_read(hw, MVPP2_CLS2_ACT_DUP_ATTR_REG);
+
+ /* read seq_attr 0x1B70 */
+ c2->sram.regs.seq_attr = mv_pp2x_read(hw, MVPP22_CLS2_ACT_SEQ_ATTR_REG);
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hw_read);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2_cls_c2_tcam_byte_get(struct mv_pp2x_cls_c2_entry *c2,
+ unsigned int offs, unsigned char *byte,
+ unsigned char *enable)
+{
+ if (mv_pp2x_ptr_validate(c2) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(byte) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_ptr_validate(enable) == MV_ERROR)
+ return MV_ERROR;
+
+ if (mv_pp2x_range_validate(offs, 0, 8) == MV_ERROR)
+ return MV_ERROR;
+
+ *byte = c2->tcam.bytes[TCAM_DATA_BYTE(offs)];
+ *enable = c2->tcam.bytes[TCAM_DATA_MASK(offs)];
+ return MV_OK;
+}
+
+/*----------------------------------------------------------------------*/
+/* return EQUALS if tcam_data[off]&tcam_mask[off] = byte */
+/*----------------------------------------------------------------------*/
+/* Classifier C2 engine Hit counters Public APIs */
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_c2_hit_cntr_is_busy(struct mv_pp2x_hw *hw)
+{
+ unsigned int reg_val;
+
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_HIT_CTR_REG);
+ reg_val &= MVPP2_CLS2_HIT_CTR_CLR_DONE_MASK;
+ reg_val >>= MVPP2_CLS2_HIT_CTR_CLR_DONE_OFF;
+
+ return (1 - (int)reg_val);
+}
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_c2_hit_cntr_clear_all(struct mv_pp2x_hw *hw)
+{
+ int iter = 0;
+
+ /* wrirte clear bit*/
+ mv_pp2x_write(hw, MVPP2_CLS2_HIT_CTR_CLR_REG,
+ (1 << MVPP2_CLS2_HIT_CTR_CLR_CLR_OFF));
+
+ while (mv_pp2x_cls_c2_hit_cntr_is_busy(hw))
+ if (iter++ >= RETRIES_EXCEEDED) {
+ pr_debug("%s:Error - retries exceeded.\n", __func__);
+ return MV_ERROR;
+ }
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hit_cntr_clear_all);
+
+/*----------------------------------------------------------------------*/
+
+int mv_pp2x_cls_c2_hit_cntr_read(struct mv_pp2x_hw *hw, int index, u32 *cntr)
+{
+ unsigned int value = 0;
+
+ /* write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, index);
+
+ value = mv_pp2x_read(hw, MVPP2_CLS2_HIT_CTR_REG);
+
+ if (cntr)
+ *cntr = value;
+
+ return MV_OK;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hit_cntr_read);
+
+void mv_pp2x_cls_flow_port_add(struct mv_pp2x_hw *hw, int index, int port_id)
+{
+ u32 data;
+
+ /* Write flow index */
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_INDEX_REG, index);
+ /* Read first data with port info */
+ data = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL0_REG);
+ /* Add the port */
+ data |= ((1 << port_id) << MVPP2_FLOW_PORT_ID);
+ /* Update the register */
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_TBL0_REG, data);
+}
+
+void mv_pp2x_cls_flow_port_del(struct mv_pp2x_hw *hw, int index, int port_id)
+{
+ u32 data;
+
+ /* Write flow index */
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_INDEX_REG, index);
+ /* Read first data with port info */
+ data = mv_pp2x_read(hw, MVPP2_CLS_FLOW_TBL0_REG);
+ /* Delete the port */
+ data &= ~(((1 << port_id) << MVPP2_FLOW_PORT_ID));
+ /* Update the register */
+ mv_pp2x_write(hw, MVPP2_CLS_FLOW_TBL0_REG, data);
+}
+
+/* The function prepare a temporary flow table for lkpid flow,
+ * in order to change the original one
+ */
+void mv_pp2x_cls_flow_tbl_temp_copy(struct mv_pp2x_hw *hw, int lkpid,
+ int *temp_flow_idx)
+{
+ struct mv_pp2x_cls_flow_entry fe;
+ int index = lkpid - MVPP2_PRS_FL_START;
+ int flow_start = hw->cls_shadow->flow_free_start;
+ struct mv_pp2x_cls_flow_info *flow_info;
+
+ flow_info = &hw->cls_shadow->flow_info[index];
+
+ if (flow_info->flow_entry_dflt) {
+ mv_pp2x_cls_flow_read(hw, flow_info->flow_entry_dflt, &fe);
+ fe.index = flow_start++;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+ if (flow_info->flow_entry_vlan) {
+ mv_pp2x_cls_flow_read(hw, flow_info->flow_entry_vlan, &fe);
+ fe.index = flow_start++;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+ if (flow_info->flow_entry_dscp) {
+ mv_pp2x_cls_flow_read(hw, flow_info->flow_entry_dscp, &fe);
+ fe.index = flow_start++;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+ if (flow_info->flow_entry_rss1) {
+ mv_pp2x_cls_flow_read(hw, flow_info->flow_entry_rss1, &fe);
+ fe.index = flow_start++;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+ if (flow_info->flow_entry_rss2) {
+ mv_pp2x_cls_flow_read(hw, flow_info->flow_entry_rss2, &fe);
+ fe.index = flow_start++;
+ mv_pp2x_cls_flow_write(hw, &fe);
+ }
+
+ *temp_flow_idx = hw->cls_shadow->flow_free_start;
+}
+
+/* C2 rule and Qos table */
+int mv_pp2x_cls_c2_hw_write(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_c2_entry *c2)
+{
+ int tcm_idx;
+
+ if (!c2 || index >= MVPP2_CLS_C2_TCAM_SIZE)
+ return -EINVAL;
+
+ c2->index = index;
+
+ /* write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, index);
+
+ /* write valid bit*/
+ c2->inv = 0;
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_INV_REG,
+ ((c2->inv) << MVPP2_CLS2_TCAM_INV_INVALID_OFF));
+
+ for (tcm_idx = 0; tcm_idx < MVPP2_CLS_C2_TCAM_WORDS; tcm_idx++)
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_DATA_REG(tcm_idx),
+ c2->tcam.words[tcm_idx]);
+
+ /* write action_tbl CLSC2_ACT_DATA */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_DATA_REG, c2->sram.regs.action_tbl);
+
+ /* write actions CLSC2_ACT */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_REG, c2->sram.regs.actions);
+
+ /* write qos_attr CLSC2_ATTR0 */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_QOS_ATTR_REG, c2->sram.regs.qos_attr);
+
+ /* write hwf_attr CLSC2_ATTR1 */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_HWF_ATTR_REG, c2->sram.regs.hwf_attr);
+
+ /* write rss_attr CLSC2_ATTR2 */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_DUP_ATTR_REG, c2->sram.regs.rss_attr);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hw_write);
+
+int mv_pp2x_cls_c2_qos_hw_write(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_c2_qos_entry *qos)
+{
+ unsigned int reg_val = 0;
+
+ if (!qos || qos->tbl_sel > MVPP2_QOS_TBL_SEL_DSCP)
+ return -EINVAL;
+
+ if (qos->tbl_sel == MVPP2_QOS_TBL_SEL_DSCP) {
+ /*dscp*/
+ if (qos->tbl_id >= MVPP2_QOS_TBL_NUM_DSCP ||
+ qos->tbl_line >= MVPP2_QOS_TBL_LINE_NUM_DSCP)
+ return -EINVAL;
+ } else {
+ /*pri*/
+ if (qos->tbl_id >= MVPP2_QOS_TBL_NUM_PRI ||
+ qos->tbl_line >= MVPP2_QOS_TBL_LINE_NUM_PRI)
+ return -EINVAL;
+ }
+ /* write index reg */
+ reg_val |= (qos->tbl_line << MVPP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+ reg_val |= (qos->tbl_sel << MVPP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+ reg_val |= (qos->tbl_id << MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+ mv_pp2x_write(hw, MVPP2_CLS2_DSCP_PRI_INDEX_REG, reg_val);
+
+ /* write data reg*/
+ mv_pp2x_write(hw, MVPP2_CLS2_QOS_TBL_REG, qos->data);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_qos_hw_write);
+
+int mv_pp2x_cls_c2_hw_inv(struct mv_pp2x_hw *hw, int index)
+{
+ if (!hw || index >= MVPP2_CLS_C2_TCAM_SIZE)
+ return -EINVAL;
+
+ /* write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, index);
+
+ /* set invalid bit*/
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_INV_REG, (1 <<
+ MVPP2_CLS2_TCAM_INV_INVALID_OFF));
+
+ /* trigger */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_DATA_REG(4), 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hw_inv);
+
+void mv_pp2x_cls_c2_hw_inv_all(struct mv_pp2x_hw *hw)
+{
+ int index;
+
+ for (index = 0; index < MVPP2_CLS_C2_TCAM_SIZE; index++)
+ mv_pp2x_cls_c2_hw_inv(hw, index);
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_hw_inv_all);
+
+static void mv_pp2x_cls_c2_qos_hw_clear_all(struct mv_pp2x_hw *hw)
+{
+ struct mv_pp2x_cls_c2_qos_entry qos;
+
+ memset(&qos, 0, sizeof(struct mv_pp2x_cls_c2_qos_entry));
+
+ /* clear DSCP tables */
+ qos.tbl_sel = MVPP2_QOS_TBL_SEL_DSCP;
+ for (qos.tbl_id = 0; qos.tbl_id < MVPP2_QOS_TBL_NUM_DSCP;
+ qos.tbl_id++) {
+ for (qos.tbl_line = 0; qos.tbl_line <
+ MVPP2_QOS_TBL_LINE_NUM_DSCP; qos.tbl_line++) {
+ mv_pp2x_cls_c2_qos_hw_write(hw, &qos);
+ }
+ }
+
+ /* clear PRIO tables */
+ qos.tbl_sel = MVPP2_QOS_TBL_SEL_PRI;
+ for (qos.tbl_id = 0; qos.tbl_id <
+ MVPP2_QOS_TBL_NUM_PRI; qos.tbl_id++)
+ for (qos.tbl_line = 0; qos.tbl_line <
+ MVPP2_QOS_TBL_LINE_NUM_PRI; qos.tbl_line++) {
+ mv_pp2x_cls_c2_qos_hw_write(hw, &qos);
+ }
+}
+
+int mv_pp2x_cls_c2_qos_tbl_set(struct mv_pp2x_cls_c2_entry *c2,
+ int tbl_id, int tbl_sel)
+{
+ if (!c2 || tbl_sel > 1)
+ return -EINVAL;
+
+ if (tbl_sel == 1) {
+ /*dscp*/
+ if (tbl_id >= MVPP2_QOS_TBL_NUM_DSCP)
+ return -EINVAL;
+ } else {
+ /*pri*/
+ if (tbl_id >= MVPP2_QOS_TBL_NUM_PRI)
+ return -EINVAL;
+ }
+ c2->sram.regs.action_tbl = (tbl_id <<
+ MVPP2_CLS2_ACT_DATA_TBL_ID_OFF) |
+ (tbl_sel << MVPP2_CLS2_ACT_DATA_TBL_SEL_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_qos_tbl_set);
+
+int mv_pp2x_cls_c2_color_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int from)
+{
+ if (!c2 || cmd > MVPP2_COLOR_ACTION_TYPE_RED_LOCK)
+ return -EINVAL;
+
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_COLOR_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_COLOR_OFF);
+
+ if (from == 1)
+ c2->sram.regs.action_tbl |= (1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_COLOR_OFF);
+ else
+ c2->sram.regs.action_tbl &= ~(1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_COLOR_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_color_set);
+
+int mv_pp2x_cls_c2_prio_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int prio, int from)
+{
+ if (!c2 || cmd > MVPP2_ACTION_TYPE_UPDT_LOCK ||
+ prio >= MVPP2_QOS_TBL_LINE_NUM_PRI)
+ return -EINVAL;
+
+ /*set command*/
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_PRI_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_PRI_OFF);
+
+ /*set modify priority value*/
+ c2->sram.regs.qos_attr &= ~MVPP2_CLS2_ACT_QOS_ATTR_PRI_MASK;
+ c2->sram.regs.qos_attr |= ((prio << MVPP2_CLS2_ACT_QOS_ATTR_PRI_OFF) &
+ MVPP2_CLS2_ACT_QOS_ATTR_PRI_MASK);
+
+ if (from == 1)
+ c2->sram.regs.action_tbl |= (1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_OFF);
+ else
+ c2->sram.regs.action_tbl &= ~(1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_prio_set);
+
+int mv_pp2x_cls_c2_dscp_set(struct mv_pp2x_cls_c2_entry *c2,
+ int cmd, int dscp, int from)
+{
+ if (!c2 || cmd > MVPP2_ACTION_TYPE_UPDT_LOCK ||
+ dscp >= MVPP2_QOS_TBL_LINE_NUM_DSCP)
+ return -EINVAL;
+
+ /*set command*/
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_DSCP_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_DSCP_OFF);
+
+ /*set modify DSCP value*/
+ c2->sram.regs.qos_attr &= ~MVPP2_CLS2_ACT_QOS_ATTR_DSCP_MASK;
+ c2->sram.regs.qos_attr |= ((dscp <<
+ MVPP2_CLS2_ACT_QOS_ATTR_DSCP_OFF) &
+ MVPP2_CLS2_ACT_QOS_ATTR_DSCP_MASK);
+
+ if (from == 1)
+ c2->sram.regs.action_tbl |= (1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_OFF);
+ else
+ c2->sram.regs.action_tbl &= ~(1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_dscp_set);
+
+int mv_pp2x_cls_c2_queue_low_set(struct mv_pp2x_cls_c2_entry *c2,
+ int cmd, int queue, int from)
+{
+ if (!c2 || cmd > MVPP2_ACTION_TYPE_UPDT_LOCK ||
+ queue >= (1 << MVPP2_CLS2_ACT_QOS_ATTR_QL_BITS))
+ return -EINVAL;
+
+ /*set command*/
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_QL_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_QL_OFF);
+
+ /*set modify Low queue value*/
+ c2->sram.regs.qos_attr &= ~MVPP2_CLS2_ACT_QOS_ATTR_QL_MASK;
+ c2->sram.regs.qos_attr |= ((queue <<
+ MVPP2_CLS2_ACT_QOS_ATTR_QL_OFF) &
+ MVPP2_CLS2_ACT_QOS_ATTR_QL_MASK);
+
+ if (from == 1)
+ c2->sram.regs.action_tbl |= (1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_LOW_Q_OFF);
+ else
+ c2->sram.regs.action_tbl &= ~(1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_LOW_Q_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_queue_low_set);
+
+int mv_pp2x_cls_c2_queue_high_set(struct mv_pp2x_cls_c2_entry *c2,
+ int cmd, int queue, int from)
+{
+ if (!c2 || cmd > MVPP2_ACTION_TYPE_UPDT_LOCK ||
+ queue >= (1 << MVPP2_CLS2_ACT_QOS_ATTR_QH_BITS))
+ return -EINVAL;
+
+ /*set command*/
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_QH_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_QH_OFF);
+
+ /*set modify High queue value*/
+ c2->sram.regs.qos_attr &= ~MVPP2_CLS2_ACT_QOS_ATTR_QH_MASK;
+ c2->sram.regs.qos_attr |= ((queue <<
+ MVPP2_CLS2_ACT_QOS_ATTR_QH_OFF) &
+ MVPP2_CLS2_ACT_QOS_ATTR_QH_MASK);
+
+ if (from == 1)
+ c2->sram.regs.action_tbl |= (1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_HIGH_Q_OFF);
+ else
+ c2->sram.regs.action_tbl &= ~(1 <<
+ MVPP2_CLS2_ACT_DATA_TBL_HIGH_Q_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_queue_high_set);
+
+int mv_pp2x_cls_c2_forward_set(struct mv_pp2x_cls_c2_entry *c2, int cmd)
+{
+ if (!c2 || cmd > MVPP2_FRWD_ACTION_TYPE_HWF_LOW_LATENCY_LOCK)
+ return -EINVAL;
+
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_FRWD_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_FRWD_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_forward_set);
+
+int mv_pp2x_cls_c2_rss_set(struct mv_pp2x_cls_c2_entry *c2, int cmd, int rss_en)
+{
+ if (!c2 || cmd > MVPP2_ACTION_TYPE_UPDT_LOCK || rss_en >=
+ (1 << MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_BITS))
+ return -EINVAL;
+
+ c2->sram.regs.actions &= ~MVPP2_CLS2_ACT_RSS_MASK;
+ c2->sram.regs.actions |= (cmd << MVPP2_CLS2_ACT_RSS_OFF);
+
+ c2->sram.regs.rss_attr &= ~MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_MASK;
+ c2->sram.regs.rss_attr |= (rss_en <<
+ MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_rss_set);
+
+int mv_pp2x_cls_c2_flow_id_en(struct mv_pp2x_cls_c2_entry *c2, int flowid_en)
+{
+ if (!c2)
+ return -EINVAL;
+
+ /*set Flow ID enable or disable*/
+ if (flowid_en)
+ c2->sram.regs.actions |= (1 << MVPP2_CLS2_ACT_FLD_EN_OFF);
+ else
+ c2->sram.regs.actions &= ~(1 << MVPP2_CLS2_ACT_FLD_EN_OFF);
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_flow_id_en);
+
+int mv_pp2x_cls_c2_tcam_byte_set(struct mv_pp2x_cls_c2_entry *c2,
+ unsigned int offs, unsigned char byte,
+ unsigned char enable)
+{
+ if (!c2 || offs >= MVPP2_CLS_C2_TCAM_DATA_BYTES)
+ return -EINVAL;
+
+ c2->tcam.bytes[TCAM_DATA_BYTE(offs)] = byte;
+ c2->tcam.bytes[TCAM_DATA_MASK(offs)] = enable;
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_tcam_byte_set);
+
+int mv_pp2x_cls_c2_qos_queue_set(struct mv_pp2x_cls_c2_qos_entry *qos,
+ u8 queue)
+{
+ if (!qos || queue >= (1 << MVPP2_CLS2_QOS_TBL_QUEUENUM_BITS))
+ return -EINVAL;
+
+ qos->data &= ~MVPP2_CLS2_QOS_TBL_QUEUENUM_MASK;
+ qos->data |= (((u32)queue) << MVPP2_CLS2_QOS_TBL_QUEUENUM_OFF);
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_qos_queue_set);
+
+static int mv_pp2x_c2_tcam_set(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_c2_add_entry *c2_add_entry,
+ unsigned int c2_hw_idx)
+{
+ int ret_code;
+ struct mv_pp2x_cls_c2_entry c2_entry;
+ int hek_offs;
+ unsigned char hek_byte[MVPP2_CLS_C2_HEK_OFF_MAX],
+ hek_byte_mask[MVPP2_CLS_C2_HEK_OFF_MAX];
+
+ if (!c2_add_entry || !hw || c2_hw_idx >= MVPP2_CLS_C2_TCAM_SIZE)
+ return -EINVAL;
+
+ /* Clear C2 sw data */
+ memset(&c2_entry, 0, sizeof(struct mv_pp2x_cls_c2_entry));
+
+ /* Set QOS table, selection and ID */
+ ret_code = mv_pp2x_cls_c2_qos_tbl_set(&c2_entry,
+ c2_add_entry->qos_info.qos_tbl_index,
+ c2_add_entry->qos_info.qos_tbl_type);
+ if (ret_code)
+ return ret_code;
+
+ /* Set color, cmd and source */
+ ret_code = mv_pp2x_cls_c2_color_set(&c2_entry,
+ c2_add_entry->action.color_act,
+ c2_add_entry->qos_info.color_src);
+ if (ret_code)
+ return ret_code;
+
+ /* Set priority(pbit), cmd, value(not from qos table) and source */
+ ret_code = mv_pp2x_cls_c2_prio_set(&c2_entry,
+ c2_add_entry->action.pri_act,
+ c2_add_entry->qos_value.pri,
+ c2_add_entry->qos_info.pri_dscp_src);
+ if (ret_code)
+ return ret_code;
+
+ /* Set DSCP, cmd, value(not from qos table) and source */
+ ret_code = mv_pp2x_cls_c2_dscp_set(&c2_entry,
+ c2_add_entry->action.dscp_act,
+ c2_add_entry->qos_value.dscp,
+ c2_add_entry->qos_info.pri_dscp_src);
+ if (ret_code)
+ return ret_code;
+
+ /* Set queue low, cmd, value, and source */
+ ret_code = mv_pp2x_cls_c2_queue_low_set(&c2_entry,
+ c2_add_entry->action.q_low_act,
+ c2_add_entry->qos_value.q_low,
+ c2_add_entry->qos_info.q_low_src);
+ if (ret_code)
+ return ret_code;
+
+ /* Set queue high, cmd, value and source */
+ ret_code = mv_pp2x_cls_c2_queue_high_set(&c2_entry,
+ c2_add_entry->action.q_high_act,
+ c2_add_entry->qos_value.q_high,
+ c2_add_entry->qos_info.q_high_src);
+ if (ret_code)
+ return ret_code;
+
+ /* Set forward */
+ ret_code = mv_pp2x_cls_c2_forward_set(&c2_entry,
+ c2_add_entry->action.frwd_act);
+ if (ret_code)
+ return ret_code;
+
+ /* Set RSS */
+ ret_code = mv_pp2x_cls_c2_rss_set(&c2_entry,
+ c2_add_entry->action.rss_act,
+ c2_add_entry->rss_en);
+ if (ret_code)
+ return ret_code;
+
+ /* Set flowID(not for multicast) */
+ ret_code = mv_pp2x_cls_c2_flow_id_en(&c2_entry,
+ c2_add_entry->action.flowid_act);
+ if (ret_code)
+ return ret_code;
+
+ /* Set C2 HEK */
+ memset(hek_byte, 0, MVPP2_CLS_C2_HEK_OFF_MAX);
+ memset(hek_byte_mask, 0, MVPP2_CLS_C2_HEK_OFF_MAX);
+
+ /* HEK offs 8, lookup type, port type */
+ hek_byte[MVPP2_CLS_C2_HEK_OFF_LKP_PORT_TYPE] =
+ (c2_add_entry->port.port_type <<
+ MVPP2_CLS_C2_HEK_PORT_TYPE_OFFS) |
+ (c2_add_entry->lkp_type <<
+ MVPP2_CLS_C2_HEK_LKP_TYPE_OFFS);
+ hek_byte_mask[MVPP2_CLS_C2_HEK_OFF_LKP_PORT_TYPE] =
+ MVPP2_CLS_C2_HEK_PORT_TYPE_MASK |
+ ((c2_add_entry->lkp_type_mask <<
+ MVPP2_CLS_C2_HEK_LKP_TYPE_OFFS) &
+ MVPP2_CLS_C2_HEK_LKP_TYPE_MASK);
+ /* HEK offs 9, port ID */
+ hek_byte[MVPP2_CLS_C2_HEK_OFF_PORT_ID] =
+ c2_add_entry->port.port_value;
+ hek_byte_mask[MVPP2_CLS_C2_HEK_OFF_PORT_ID] =
+ c2_add_entry->port.port_mask;
+
+ for (hek_offs = MVPP2_CLS_C2_HEK_OFF_PORT_ID; hek_offs >=
+ MVPP2_CLS_C2_HEK_OFF_BYTE0; hek_offs--) {
+ ret_code = mv_pp2x_cls_c2_tcam_byte_set(&c2_entry,
+ hek_offs,
+ hek_byte[hek_offs],
+ hek_byte_mask[hek_offs]);
+ if (ret_code)
+ return ret_code;
+ }
+
+ /* Write C2 entry data to HW */
+ ret_code = mv_pp2x_cls_c2_hw_write(hw, c2_hw_idx, &c2_entry);
+ if (ret_code)
+ return ret_code;
+
+ return 0;
+}
+
+/* C2 TCAM init */
+int mv_pp2x_c2_init(struct platform_device *pdev, struct mv_pp2x_hw *hw)
+{
+ int i;
+
+ /* Invalid all C2 and QoS entries */
+ mv_pp2x_cls_c2_hw_inv_all(hw);
+
+ mv_pp2x_cls_c2_qos_hw_clear_all(hw);
+
+ /* Set CLSC2_TCAM_CTRL to enable C2, or C2 does not work */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_CTRL_REG,
+ MVPP2_CLS2_TCAM_CTRL_EN_MASK);
+
+ /* Allocate mem for c2 shadow */
+ hw->c2_shadow = devm_kcalloc(&pdev->dev, 1,
+ sizeof(struct mv_pp2x_c2_shadow),
+ GFP_KERNEL);
+ if (!hw->c2_shadow)
+ return -ENOMEM;
+
+ /* Init the rule idx to invalid value */
+ for (i = 0; i < 8; i++) {
+ hw->c2_shadow->rule_idx_info[i].vlan_pri_idx =
+ MVPP2_CLS_C2_TCAM_SIZE;
+ hw->c2_shadow->rule_idx_info[i].dscp_pri_idx =
+ MVPP2_CLS_C2_TCAM_SIZE;
+ hw->c2_shadow->rule_idx_info[i].default_rule_idx =
+ MVPP2_CLS_C2_TCAM_SIZE;
+ }
+ hw->c2_shadow->c2_tcam_free_start = 0;
+
+ return 0;
+}
+
+static int mv_pp2x_c2_rule_add(struct mv_pp2x_port *port,
+ struct mv_pp2x_c2_add_entry *c2_add_entry)
+{
+ int ret, lkp_type, c2_index = 0;
+ bool first_free_update = false;
+ struct mv_pp2x_c2_rule_idx *rule_idx;
+
+ rule_idx = &port->priv->hw.c2_shadow->rule_idx_info[port->id];
+
+ if (!port || !c2_add_entry)
+ return -EINVAL;
+
+ lkp_type = c2_add_entry->lkp_type;
+ /* Write rule in C2 TCAM */
+ if (lkp_type == MVPP2_CLS_LKP_VLAN_PRI) {
+ if (rule_idx->vlan_pri_idx == MVPP2_CLS_C2_TCAM_SIZE) {
+ /* If the C2 rule is new, apply a free c2 rule index */
+ c2_index =
+ port->priv->hw.c2_shadow->c2_tcam_free_start;
+ first_free_update = true;
+ } else {
+ /* If the C2 rule is exist one,
+ * take the C2 index from shadow
+ */
+ c2_index = rule_idx->vlan_pri_idx;
+ first_free_update = false;
+ }
+ } else if (lkp_type == MVPP2_CLS_LKP_DSCP_PRI) {
+ if (rule_idx->dscp_pri_idx == MVPP2_CLS_C2_TCAM_SIZE) {
+ c2_index =
+ port->priv->hw.c2_shadow->c2_tcam_free_start;
+ first_free_update = true;
+ } else {
+ c2_index = rule_idx->dscp_pri_idx;
+ first_free_update = false;
+ }
+ } else if (lkp_type == MVPP2_CLS_LKP_DEFAULT) {
+ if (rule_idx->default_rule_idx == MVPP2_CLS_C2_TCAM_SIZE) {
+ c2_index =
+ port->priv->hw.c2_shadow->c2_tcam_free_start;
+ first_free_update = true;
+ } else {
+ c2_index = rule_idx->default_rule_idx;
+ first_free_update = false;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Write C2 TCAM HW */
+ ret = mv_pp2x_c2_tcam_set(&port->priv->hw, c2_add_entry, c2_index);
+ if (ret)
+ return ret;
+
+ /* Update first free rule */
+ if (first_free_update)
+ port->priv->hw.c2_shadow->c2_tcam_free_start++;
+
+ /* Update shadow */
+ if (lkp_type == MVPP2_CLS_LKP_VLAN_PRI)
+ rule_idx->vlan_pri_idx = c2_index;
+ else if (lkp_type == MVPP2_CLS_LKP_DSCP_PRI)
+ rule_idx->dscp_pri_idx = c2_index;
+ else if (lkp_type == MVPP2_CLS_LKP_DEFAULT)
+ rule_idx->default_rule_idx = c2_index;
+
+ return 0;
+}
+
+/* Fill the qos table with queue */
+static void mv_pp2x_cls_c2_qos_tbl_fill(struct mv_pp2x_port *port,
+ u8 tbl_sel, u8 tbl_id, u8 start_queue)
+{
+ struct mv_pp2x_cls_c2_qos_entry qos_entry;
+ u32 pri, line_num;
+ u8 cos_value, cos_queue, queue;
+
+ if (tbl_sel == MVPP2_QOS_TBL_SEL_PRI)
+ line_num = MVPP2_QOS_TBL_LINE_NUM_PRI;
+ else
+ line_num = MVPP2_QOS_TBL_LINE_NUM_DSCP;
+
+ memset(&qos_entry, 0, sizeof(struct mv_pp2x_cls_c2_qos_entry));
+ qos_entry.tbl_id = tbl_id;
+ qos_entry.tbl_sel = tbl_sel;
+
+ /* Fill the QoS dscp/pbit table */
+ for (pri = 0; pri < line_num; pri++) {
+ /* cos_value equal to dscp/8 or pbit value */
+ cos_value = ((tbl_sel == MVPP2_QOS_TBL_SEL_PRI) ?
+ pri : (pri / 8));
+ /* each nibble of pri_map stands for a cos-value,
+ * nibble value is the queue
+ */
+ cos_queue = mv_pp2x_cosval_queue_map(port, cos_value);
+ qos_entry.tbl_line = pri;
+ /* map cos queue to physical queue */
+ /* Physical queue contains 2 parts: port ID and CPU ID,
+ * CPU ID will be used in RSS
+ */
+ queue = start_queue + cos_queue;
+ mv_pp2x_cls_c2_qos_queue_set(&qos_entry, queue);
+ mv_pp2x_cls_c2_qos_hw_write(&port->priv->hw, &qos_entry);
+ }
+}
+
+static void mv_pp2x_cls_c2_entry_common_set(struct mv_pp2x_c2_add_entry *entry,
+ u8 port,
+ u8 lkp_type)
+{
+ memset(entry, 0, sizeof(struct mv_pp2x_c2_add_entry));
+ /* Port info */
+ entry->port.port_type = MVPP2_SRC_PORT_TYPE_PHY;
+ entry->port.port_value = (1 << port);
+ entry->port.port_mask = 0xff;
+ /* Lookup type */
+ entry->lkp_type = lkp_type;
+ entry->lkp_type_mask = 0x3F;
+ /* Action info */
+ entry->action.color_act = MVPP2_COLOR_ACTION_TYPE_NO_UPDT_LOCK;
+ entry->action.pri_act = MVPP2_ACTION_TYPE_NO_UPDT_LOCK;
+ entry->action.dscp_act = MVPP2_ACTION_TYPE_NO_UPDT_LOCK;
+ entry->action.q_low_act = MVPP2_ACTION_TYPE_UPDT_LOCK;
+ entry->action.q_high_act = MVPP2_ACTION_TYPE_UPDT_LOCK;
+ entry->action.rss_act = MVPP2_ACTION_TYPE_UPDT_LOCK;
+ /* To CPU */
+ entry->action.frwd_act = MVPP2_FRWD_ACTION_TYPE_SWF_LOCK;
+}
+
+/* C2 rule set */
+int mv_pp2x_cls_c2_rule_set(struct mv_pp2x_port *port, u8 start_queue)
+{
+ struct mv_pp2x_c2_add_entry c2_init_entry;
+ int ret;
+ u8 cos_value, cos_queue, queue, lkp_type;
+
+ /* QoS of pbit rule */
+ for (lkp_type = MVPP2_CLS_LKP_VLAN_PRI; lkp_type <=
+ MVPP2_CLS_LKP_DEFAULT; lkp_type++) {
+ /* Set common part of C2 rule */
+ mv_pp2x_cls_c2_entry_common_set(&c2_init_entry,
+ port->id,
+ lkp_type);
+
+ /* QoS info */
+ if (lkp_type != MVPP2_CLS_LKP_DEFAULT) {
+ u8 tbl_sel;
+
+ /* QoS info from C2 QoS table */
+ /* Set the QoS table index equal to port ID */
+ c2_init_entry.qos_info.qos_tbl_index = port->id;
+ c2_init_entry.qos_info.q_low_src =
+ MVPP2_QOS_SRC_DSCP_PBIT_TBL;
+ c2_init_entry.qos_info.q_high_src =
+ MVPP2_QOS_SRC_DSCP_PBIT_TBL;
+ if (lkp_type == MVPP2_CLS_LKP_VLAN_PRI) {
+ c2_init_entry.qos_info.qos_tbl_type =
+ MVPP2_QOS_TBL_SEL_PRI;
+ tbl_sel = MVPP2_QOS_TBL_SEL_PRI;
+ } else if (lkp_type == MVPP2_CLS_LKP_DSCP_PRI) {
+ c2_init_entry.qos_info.qos_tbl_type =
+ MVPP2_QOS_TBL_SEL_DSCP;
+ tbl_sel = MVPP2_QOS_TBL_SEL_DSCP;
+ }
+ /* Fill qos table */
+ mv_pp2x_cls_c2_qos_tbl_fill(port,
+ tbl_sel,
+ port->id,
+ start_queue);
+ } else {
+ /* QoS info from C2 action table */
+ c2_init_entry.qos_info.q_low_src =
+ MVPP2_QOS_SRC_ACTION_TBL;
+ c2_init_entry.qos_info.q_high_src =
+ MVPP2_QOS_SRC_ACTION_TBL;
+ cos_value = port->cos_cfg.default_cos;
+ cos_queue = mv_pp2x_cosval_queue_map(port, cos_value);
+ /* map to physical queue */
+ /* Physical queue contains 2 parts: port ID and CPU ID,
+ * CPU ID will be used in RSS
+ */
+ queue = start_queue + cos_queue;
+ c2_init_entry.qos_value.q_low = ((u16)queue) &
+ ((1 << MVPP2_CLS2_ACT_QOS_ATTR_QL_BITS) - 1);
+ c2_init_entry.qos_value.q_high = ((u16)queue) >>
+ MVPP2_CLS2_ACT_QOS_ATTR_QL_BITS;
+ }
+ /* RSS En in PP22 */
+ c2_init_entry.rss_en = port->rss_cfg.rss_en;
+
+ /* Add rule to C2 TCAM */
+ ret = mv_pp2x_c2_rule_add(port, &c2_init_entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp2x_cls_c2_rule_set);
+
+/* The function get the queue in the C2 rule with input index */
+u8 mv_pp2x_cls_c2_rule_queue_get(struct mv_pp2x_hw *hw, u32 rule_idx)
+{
+ u32 reg_val;
+ u8 queue;
+
+ /* Write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, rule_idx);
+
+ /* Read Reg CLSC2_ATTR0 */
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_ACT_QOS_ATTR_REG);
+ queue = (reg_val & (MVPP2_CLS2_ACT_QOS_ATTR_QL_MASK |
+ MVPP2_CLS2_ACT_QOS_ATTR_QH_MASK)) >>
+ MVPP2_CLS2_ACT_QOS_ATTR_QL_OFF;
+ return queue;
+}
+
+/* The function set the qos queue in one C2 rule */
+void mv_pp2x_cls_c2_rule_queue_set(struct mv_pp2x_hw *hw, u32 rule_idx,
+ u8 queue)
+{
+ u32 reg_val;
+
+ /* Write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, rule_idx);
+
+ /* Read Reg CLSC2_ATTR0 */
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_ACT_QOS_ATTR_REG);
+ /* Update Value */
+ reg_val &= (~(MVPP2_CLS2_ACT_QOS_ATTR_QL_MASK |
+ MVPP2_CLS2_ACT_QOS_ATTR_QH_MASK));
+ reg_val |= (((u32)queue) << MVPP2_CLS2_ACT_QOS_ATTR_QL_OFF);
+
+ /* Write Reg CLSC2_ATTR0 */
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_QOS_ATTR_REG, reg_val);
+}
+
+/* The function get the queue in the pbit table entry */
+u8 mv_pp2x_cls_c2_pbit_tbl_queue_get(struct mv_pp2x_hw *hw, u8 tbl_id,
+ u8 tbl_line)
+{
+ u8 queue;
+ u32 reg_val = 0;
+
+ /* write index reg */
+ reg_val |= (tbl_line << MVPP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+ reg_val |= (MVPP2_QOS_TBL_SEL_PRI <<
+ MVPP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+ reg_val |= (tbl_id << MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+ mv_pp2x_write(hw, MVPP2_CLS2_DSCP_PRI_INDEX_REG, reg_val);
+ /* Read Reg CLSC2_DSCP_PRI */
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_QOS_TBL_REG);
+ queue = (reg_val & MVPP2_CLS2_QOS_TBL_QUEUENUM_MASK) >>
+ MVPP2_CLS2_QOS_TBL_QUEUENUM_OFF;
+
+ return queue;
+}
+
+/* The function set the queue in the pbit table entry */
+void mv_pp2x_cls_c2_pbit_tbl_queue_set(struct mv_pp2x_hw *hw,
+ u8 tbl_id, u8 tbl_line, u8 queue)
+{
+ u32 reg_val = 0;
+
+ /* write index reg */
+ reg_val |= (tbl_line << MVPP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+ reg_val |=
+ (MVPP2_QOS_TBL_SEL_PRI << MVPP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+ reg_val |= (tbl_id << MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+ mv_pp2x_write(hw, MVPP2_CLS2_DSCP_PRI_INDEX_REG, reg_val);
+
+ /* Read Reg CLSC2_DSCP_PRI */
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_QOS_TBL_REG);
+ reg_val &= (~MVPP2_CLS2_QOS_TBL_QUEUENUM_MASK);
+ reg_val |= (((u32)queue) << MVPP2_CLS2_QOS_TBL_QUEUENUM_OFF);
+
+ /* Write Reg CLSC2_DSCP_PRI */
+ mv_pp2x_write(hw, MVPP2_CLS2_QOS_TBL_REG, reg_val);
+}
+
+/* RSS */
+/* The function will set rss table entry */
+int mv_pp22_rss_tbl_entry_set(struct mv_pp2x_hw *hw,
+ struct mv_pp22_rss_entry *rss)
+{
+ unsigned int reg_val = 0;
+
+ if (!rss || rss->sel > MVPP22_RSS_ACCESS_TBL)
+ return -EINVAL;
+
+ if (rss->sel == MVPP22_RSS_ACCESS_POINTER) {
+ if (rss->u.pointer.rss_tbl_ptr >= MVPP22_RSS_TBL_NUM)
+ return -EINVAL;
+ /* Write index */
+ reg_val |= rss->u.pointer.rxq_idx <<
+ MVPP22_RSS_IDX_RXQ_NUM_OFF;
+ mv_pp2x_write(hw, MVPP22_RSS_IDX_REG, reg_val);
+ /* Write entry */
+ reg_val &= (~MVPP22_RSS_RXQ2RSS_TBL_POINT_MASK);
+ reg_val |= rss->u.pointer.rss_tbl_ptr <<
+ MVPP22_RSS_RXQ2RSS_TBL_POINT_OFF;
+ mv_pp2x_write(hw, MVPP22_RSS_RXQ2RSS_TBL_REG, reg_val);
+ } else if (rss->sel == MVPP22_RSS_ACCESS_TBL) {
+ if (rss->u.entry.tbl_id >= MVPP22_RSS_TBL_NUM ||
+ rss->u.entry.tbl_line >= MVPP22_RSS_TBL_LINE_NUM ||
+ rss->u.entry.width >= MVPP22_RSS_WIDTH_MAX)
+ return -EINVAL;
+ /* Write index */
+ reg_val |= (rss->u.entry.tbl_line <<
+ MVPP22_RSS_IDX_ENTRY_NUM_OFF |
+ rss->u.entry.tbl_id << MVPP22_RSS_IDX_TBL_NUM_OFF);
+ mv_pp2x_write(hw, MVPP22_RSS_IDX_REG, reg_val);
+ /* Write entry */
+ reg_val &= (~MVPP22_RSS_TBL_ENTRY_MASK);
+ reg_val |= (rss->u.entry.rxq << MVPP22_RSS_TBL_ENTRY_OFF);
+ mv_pp2x_write(hw, MVPP22_RSS_TBL_ENTRY_REG, reg_val);
+ reg_val &= (~MVPP22_RSS_WIDTH_MASK);
+ reg_val |= (rss->u.entry.width << MVPP22_RSS_WIDTH_OFF);
+ mv_pp2x_write(hw, MVPP22_RSS_WIDTH_REG, reg_val);
+ }
+ return 0;
+}
+
+/* The function will get rss table entry */
+int mv_pp22_rss_tbl_entry_get(struct mv_pp2x_hw *hw,
+ struct mv_pp22_rss_entry *rss)
+{
+ unsigned int reg_val = 0;
+
+ if (!rss || rss->sel > MVPP22_RSS_ACCESS_TBL)
+ return -EINVAL;
+
+ if (rss->sel == MVPP22_RSS_ACCESS_POINTER) {
+ /* Read entry */
+ rss->u.pointer.rss_tbl_ptr =
+ mv_pp2x_read(hw, MVPP22_RSS_RXQ2RSS_TBL_REG) &
+ MVPP22_RSS_RXQ2RSS_TBL_POINT_MASK;
+ } else if (rss->sel == MVPP22_RSS_ACCESS_TBL) {
+ if (rss->u.entry.tbl_id >= MVPP22_RSS_TBL_NUM ||
+ rss->u.entry.tbl_line >= MVPP22_RSS_TBL_LINE_NUM)
+ return -EINVAL;
+ /* Read index */
+ reg_val |= (rss->u.entry.tbl_line <<
+ MVPP22_RSS_IDX_ENTRY_NUM_OFF |
+ rss->u.entry.tbl_id <<
+ MVPP22_RSS_IDX_TBL_NUM_OFF);
+ mv_pp2x_write(hw, MVPP22_RSS_IDX_REG, reg_val);
+ /* Read entry */
+ rss->u.entry.rxq = mv_pp2x_read(hw,
+ MVPP22_RSS_TBL_ENTRY_REG) &
+ MVPP22_RSS_TBL_ENTRY_MASK;
+ rss->u.entry.width = mv_pp2x_read(hw,
+ MVPP22_RSS_WIDTH_REG) &
+ MVPP22_RSS_WIDTH_MASK;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(mv_pp22_rss_tbl_entry_get);
+
+/* The function allocate a rss table for each phisical rxq,
+ * they have same cos priority
+ */
+int mv_pp22_rss_rxq_set(struct mv_pp2x_port *port, u32 cos_width)
+{
+ int rxq;
+ struct mv_pp22_rss_entry rss_entry;
+ int cos_mask = ((1 << cos_width) - 1);
+
+ if (port->priv->pp2_version != PPV22)
+ return 0;
+
+ memset(&rss_entry, 0, sizeof(struct mv_pp22_rss_entry));
+
+ rss_entry.sel = MVPP22_RSS_ACCESS_POINTER;
+
+ for (rxq = 0; rxq < port->num_rx_queues; rxq++) {
+ rss_entry.u.pointer.rxq_idx = port->rxqs[rxq]->id;
+ rss_entry.u.pointer.rss_tbl_ptr =
+ port->rxqs[rxq]->id & cos_mask;
+ if (mv_pp22_rss_tbl_entry_set(&port->priv->hw, &rss_entry))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mv_pp22_c2_rss_attr_set(struct mv_pp2x_hw *hw, u32 index, bool en)
+{
+ int reg_val;
+
+ /* write index reg */
+ mv_pp2x_write(hw, MVPP2_CLS2_TCAM_IDX_REG, index);
+ /* Update rss_attr in reg CLSC2_ATTR2 */
+ reg_val = mv_pp2x_read(hw, MVPP2_CLS2_ACT_DUP_ATTR_REG);
+ if (en)
+ reg_val |= MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_MASK;
+ else
+ reg_val &= (~MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_MASK);
+
+ mv_pp2x_write(hw, MVPP2_CLS2_ACT_DUP_ATTR_REG, reg_val);
+}
+
+void mv_pp22_rss_c2_enable(struct mv_pp2x_port *port, bool en)
+{
+ int lkp_type;
+ int c2_index[MVPP2_CLS_LKP_MAX];
+ struct mv_pp2x_c2_rule_idx *rule_idx;
+
+ rule_idx = &port->priv->hw.c2_shadow->rule_idx_info[port->id];
+
+ /* Get the C2 index from shadow */
+ c2_index[MVPP2_CLS_LKP_VLAN_PRI] = rule_idx->vlan_pri_idx;
+ c2_index[MVPP2_CLS_LKP_DSCP_PRI] = rule_idx->dscp_pri_idx;
+ c2_index[MVPP2_CLS_LKP_DEFAULT] = rule_idx->default_rule_idx;
+
+ /* For lookup type of MVPP2_CLS_LKP_HASH,
+ * there is no corresponding C2 rule, so skip it
+ */
+ for (lkp_type = MVPP2_CLS_LKP_VLAN_PRI; lkp_type < MVPP2_CLS_LKP_MAX;
+ lkp_type++)
+ mv_pp22_c2_rss_attr_set(&port->priv->hw,
+ c2_index[lkp_type],
+ en);
+}
+
+/* Initialize Tx FIFO's */
+void mv_pp2x_tx_fifo_size_set(struct mv_pp2x_hw *hw, u32 port_id, u32 val)
+{
+ mv_pp2x_write(hw,
+ MVPP22_TX_FIFO_SIZE_REG(port_id),
+ val & MVPP22_TX_FIFO_SIZE_MASK);
+}
+
+void mv_pp2x_tx_fifo_threshold_set(struct mv_pp2x_hw *hw, u32 port_id, u32 val)
+{
+ mv_pp2x_write(hw,
+ MVPP22_TX_FIFO_THRESH_REG(port_id),
+ val & MVPP22_TX_FIFO_THRESH_MASK);
+}
+
+/* Check number of buffers in BM pool */
+int mv_pp2x_check_hw_buf_num(struct mv_pp2x *priv, struct mv_pp2x_bm_pool *bm_pool)
+{
+ int buf_num = 0;
+
+ buf_num += mv_pp2x_read(&priv->hw, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id))
+ & MVPP22_BM_POOL_PTRS_NUM_MASK;
+ buf_num += mv_pp2x_read(&priv->hw, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id))
+ & MVPP2_BM_BPPI_PTR_NUM_MASK;
+
+ /* HW has one buffer ready and is not reflected in "external + internal" counters */
+ if (buf_num)
+ return (buf_num + 1);
+ else
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.h b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.h
new file mode 100644
index 000000000000..790ee39f4269
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw.h
@@ -0,0 +1,796 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MVPP2_HW_H_
+#define _MVPP2_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/printk.h>
+
+#include <linux/platform_device.h>
+
+static inline void mv_pp2x_write(struct mv_pp2x_hw *hw, u32 offset, u32 data)
+{
+ int cpu = get_cpu();
+ void *reg_ptr = hw->cpu_base[cpu] + offset;
+
+ writel(data, reg_ptr);
+ put_cpu();
+}
+
+static inline void mv_pp2x_relaxed_write(struct mv_pp2x_hw *hw, u32 offset, u32 data,
+ int cpu)
+{
+ void *reg_ptr = hw->cpu_base[cpu] + offset;
+
+ writel_relaxed(data, reg_ptr);
+}
+
+static inline u32 mv_pp2x_read(struct mv_pp2x_hw *hw, u32 offset)
+{
+ int cpu = get_cpu();
+ void *reg_ptr = hw->cpu_base[cpu] + offset;
+ u32 val;
+
+ val = readl(reg_ptr);
+ put_cpu();
+
+ return val;
+}
+
+static inline u32 mv_pp2x_relaxed_read(struct mv_pp2x_hw *hw, u32 offset, int cpu)
+{
+ void *reg_ptr = hw->cpu_base[cpu] + offset;
+ u32 val;
+
+ val = readl_relaxed(reg_ptr);
+ return val;
+}
+
+static inline void mv_pp22_thread_write(struct mv_pp2x_hw *hw, u32 sw_thread,
+ u32 offset, u32 data)
+{
+ writel(data, hw->base + sw_thread * MVPP2_ADDR_SPACE_SIZE + offset);
+}
+
+static inline u32 mv_pp22_thread_read(struct mv_pp2x_hw *hw, u32 sw_thread,
+ u32 offset)
+{
+ return readl(hw->base + sw_thread * MVPP2_ADDR_SPACE_SIZE + offset);
+}
+
+static inline void mv_pp22_thread_relaxed_write(struct mv_pp2x_hw *hw,
+ u32 sw_thread,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, hw->base + sw_thread * MVPP2_ADDR_SPACE_SIZE + offset);
+}
+
+static inline u32 mv_pp22_thread_relaxed_read(struct mv_pp2x_hw *hw,
+ u32 sw_thread,
+ u32 offset)
+{
+ return readl_relaxed(hw->base + sw_thread * MVPP2_ADDR_SPACE_SIZE + offset);
+}
+
+static inline void mv_pp21_isr_rx_group_write(struct mv_pp2x_hw *hw, int port,
+ int num_rx_queues)
+{
+ mv_pp2x_write(hw, MVPP21_ISR_RXQ_GROUP_REG(port), num_rx_queues);
+}
+
+static inline void mv_pp22_isr_rx_group_write(struct mv_pp2x_hw *hw, int port,
+ int sub_group,
+ int start_queue,
+ int num_rx_queues)
+{
+ int val;
+
+ val = (port << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET) | sub_group;
+ mv_pp2x_write(hw, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
+ val = (num_rx_queues << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET) |
+ start_queue;
+ mv_pp2x_write(hw, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
+}
+
+/* Get number of physical egress port */
+static inline int mv_pp2x_egress_port(struct mv_pp2x_port *port)
+{
+ return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mv_pp2x_txq_phys(int port, int txq)
+{
+ return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int mv_pp2x_rxq_received(struct mv_pp2x_port *port, int rxq_id)
+{
+ u32 val = mv_pp2x_read(&port->priv->hw, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+ return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int mv_pp2x_rxq_free(struct mv_pp2x_port *port, int rxq_id)
+{
+ u32 val = mv_pp2x_read(&port->priv->hw, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+ return ((val & MVPP2_RXQ_NON_OCCUPIED_MASK) >>
+ MVPP2_RXQ_NON_OCCUPIED_OFFSET);
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void mv_pp2x_rxq_status_update(struct mv_pp2x_port *port,
+ int rxq_id,
+ int used_count,
+ int free_count)
+{
+ /* Decrement the number of used descriptors and increment count
+ * increment the number of free descriptors.
+ */
+ u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+ mv_pp2x_write(&port->priv->hw,
+ MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mv_pp2x_rx_desc *
+mv_pp2x_rxq_next_desc_get(struct mv_pp2x_rx_queue *rxq)
+{
+ int rx_desc = rxq->next_desc_to_proc;
+
+ rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+ /* For uneven descriptors, fetch next two descriptors (2*32B) */
+ if (rx_desc & 0x1)
+ prefetch(rxq->first_desc + rxq->next_desc_to_proc);
+ return (rxq->first_desc + rx_desc);
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+static inline void mv_pp2x_interrupts_mask(void *arg)
+{
+ struct mv_pp2x_port *port = arg;
+
+ mv_pp2x_write(&port->priv->hw, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+static inline void mv_pp2x_interrupts_unmask(void *arg)
+{
+ struct mv_pp2x_port *port = arg;
+ u32 val;
+
+ val = MVPP2_CAUSE_MISC_SUM_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+ if (port->priv->pp2xdata->interrupt_tx_done)
+ val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+
+ mv_pp2x_write(&port->priv->hw,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+}
+
+static inline void mv_pp2x_shared_thread_interrupts_mask(
+ struct mv_pp2x_port *port)
+{
+ struct queue_vector *q_vec = &port->q_vector[0];
+ int i;
+
+ if (!port->priv->pp2xdata->multi_addr_space)
+ return;
+
+ for (i = 0; i < port->num_qvector; i++) {
+ if (q_vec[i].qv_type == MVPP2_SHARED)
+ mv_pp22_thread_write(&port->priv->hw,
+ q_vec[i].sw_thread_id,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id),
+ 0);
+ }
+}
+
+/* Unmask the shared CPU's Rx interrupts */
+static inline void mv_pp2x_shared_thread_interrupts_unmask(
+ struct mv_pp2x_port *port)
+{
+ struct queue_vector *q_vec = &port->q_vector[0];
+ int i;
+
+ if (!port->priv->pp2xdata->multi_addr_space)
+ return;
+
+ for (i = 0; i < port->num_qvector; i++) {
+ if (q_vec[i].qv_type == MVPP2_SHARED)
+ mv_pp22_thread_write(&port->priv->hw,
+ q_vec[i].sw_thread_id,
+ MVPP2_ISR_RX_TX_MASK_REG(port->id),
+ MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
+ }
+}
+
+static inline struct mv_pp2x_rx_queue *mv_pp2x_get_rx_queue(
+ struct mv_pp2x_port *port, u32 cause)
+{
+ int rx_queue = fls(cause) - 1;
+
+ return port->rxqs[rx_queue];
+}
+
+static inline struct mv_pp2x_tx_queue *mv_pp2x_get_tx_queue(
+ struct mv_pp2x_port *port, u32 cause)
+{
+ int tx_queue = fls(cause) - 1;
+
+ return port->txqs[tx_queue];
+}
+
+static inline dma_addr_t mv_pp2x_bm_phys_addr_get(struct mv_pp2x_hw *hw, u32 pool)
+{
+ dma_addr_t val;
+
+ val = mv_pp2x_read(hw, MVPP2_BM_PHY_ALLOC_REG(pool));
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ {
+ u64 val2;
+
+ val2 = mv_pp2x_read(hw, MVPP22_BM_PHY_VIRT_HIGH_ALLOC_REG);
+ val2 &= MVPP22_BM_PHY_HIGH_ALLOC_MASK;
+ val |= (val2 << 32);
+ }
+#endif
+
+ return val;
+}
+
+static inline void mv_pp2x_bm_hw_pool_create(struct mv_pp2x_hw *hw,
+ u32 pool,
+ dma_addr_t pool_addr,
+ int size)
+{
+ u32 val;
+
+ mv_pp2x_write(hw, MVPP2_BM_POOL_BASE_ADDR_REG(pool),
+ lower_32_bits(pool_addr));
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ mv_pp2x_write(hw, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG,
+ (upper_32_bits(pool_addr) & MVPP22_ADDR_HIGH_MASK));
+#endif
+ mv_pp2x_write(hw, MVPP2_BM_POOL_SIZE_REG(pool), size);
+
+ val = mv_pp2x_read(hw, MVPP2_BM_POOL_CTRL_REG(pool));
+ val |= MVPP2_BM_START_MASK;
+ mv_pp2x_write(hw, MVPP2_BM_POOL_CTRL_REG(pool), val);
+}
+
+static inline void mv_pp2x_bm_pool_put_virtual(struct mv_pp2x_hw *hw, u32 pool,
+ dma_addr_t buf_phys_addr,
+ u8 *buf_virt_addr, int cpu)
+{
+ mv_pp2x_relaxed_write(hw, MVPP2_BM_VIRT_RLS_REG,
+ lower_32_bits((uintptr_t)buf_virt_addr), cpu);
+
+ mv_pp2x_relaxed_write(hw, MVPP2_BM_PHY_RLS_REG(pool),
+ lower_32_bits(buf_phys_addr), cpu);
+}
+
+/* Release buffer to BM */
+static inline void mv_pp2x_bm_pool_put(struct mv_pp2x_hw *hw, u32 pool,
+ dma_addr_t buf_phys_addr, int cpu)
+{
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ mv_pp2x_relaxed_write(hw, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
+ upper_32_bits(buf_phys_addr), cpu);
+#endif
+
+ mv_pp2x_relaxed_write(hw, MVPP2_BM_PHY_RLS_REG(pool),
+ lower_32_bits(buf_phys_addr), cpu);
+}
+
+/* Release multicast buffer */
+static inline void mv_pp2x_bm_pool_mc_put(struct mv_pp2x_port *port, int pool,
+ u32 buf_phys_addr,
+ u32 buf_virt_addr,
+ int mc_id, int cpu)
+{
+ u32 val = 0;
+
+ val |= (mc_id & MVPP21_BM_MC_ID_MASK);
+ mv_pp2x_write(&port->priv->hw, MVPP21_BM_MC_RLS_REG, val);
+ /*TODO : YuvalC, this is just workaround to compile.
+ * Need to handle mv_pp2x_buff_hdr_rx().
+ */
+ mv_pp2x_bm_pool_put(&port->priv->hw, pool,
+ (dma_addr_t)(buf_phys_addr |
+ MVPP2_BM_PHY_RLS_MC_BUFF_MASK), cpu);
+}
+
+static inline void mv_pp2x_port_interrupts_enable(struct mv_pp2x_port *port)
+{
+ int sw_thread_mask = 0, i;
+ struct queue_vector *q_vec = &port->q_vector[0];
+
+ for (i = 0; i < port->num_qvector; i++)
+ sw_thread_mask |= q_vec[i].sw_thread_mask;
+ mv_pp2x_write(&port->priv->hw, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mv_pp2x_port_interrupts_disable(struct mv_pp2x_port *port)
+{
+ int sw_thread_mask = 0, i;
+ struct queue_vector *q_vec = &port->q_vector[0];
+
+ for (i = 0; i < port->num_qvector; i++)
+ sw_thread_mask |= q_vec[i].sw_thread_mask;
+
+ mv_pp2x_write(&port->priv->hw, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
+}
+
+static inline void mv_pp2x_qvector_interrupt_enable(struct queue_vector *q_vec)
+{
+ struct mv_pp2x_port *port = q_vec->parent;
+
+ mv_pp2x_write(&port->priv->hw, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_ENABLE_INTERRUPT(q_vec->sw_thread_mask));
+}
+
+static inline void mv_pp2x_qvector_interrupt_disable(struct queue_vector *q_vec)
+{
+ struct mv_pp2x_port *port = q_vec->parent;
+
+ mv_pp2x_write(&port->priv->hw, MVPP2_ISR_ENABLE_REG(port->id),
+ MVPP2_ISR_DISABLE_INTERRUPT(q_vec->sw_thread_mask));
+}
+
+static inline u32 mv_pp2x_qvector_interrupt_state_get(struct queue_vector
+ *q_vec)
+{
+ struct mv_pp2x_port *port = q_vec->parent;
+ u32 state;
+
+ state = mv_pp2x_read(&port->priv->hw, MVPP2_ISR_ENABLE_REG(port->id));
+ state &= MVPP2_ISR_ENABLE_INTERRUPT(q_vec->sw_thread_mask);
+ return state;
+}
+
+static inline int mv_pp2x_txq_sent_desc_proc(struct mv_pp2x_port *port,
+ int sw_thread,
+ u8 txq_id)
+{
+ u32 val;
+
+ /* Reading status reg resets transmitted descriptor counter */
+ if (port->priv->pp2_version == PPV21) {
+ sw_thread = 0;
+ val = mv_pp22_thread_relaxed_read(&port->priv->hw,
+ sw_thread,
+ MVPP21_TXQ_SENT_REG(txq_id));
+ return (val & MVPP21_TRANSMITTED_COUNT_MASK) >>
+ MVPP21_TRANSMITTED_COUNT_OFFSET;
+ }
+ else {
+ val = mv_pp22_thread_relaxed_read(&port->priv->hw,
+ sw_thread,
+ MVPP22_TXQ_SENT_REG(txq_id));
+ return (val & MVPP22_TRANSMITTED_COUNT_MASK) >>
+ MVPP22_TRANSMITTED_COUNT_OFFSET;
+ }
+}
+
+static inline void mv_pp2x_txq_desc_put(struct mv_pp2x_tx_queue *txq)
+{
+ if (txq->next_desc_to_proc == 0)
+ txq->next_desc_to_proc = txq->last_desc - 1;
+ else
+ txq->next_desc_to_proc--;
+}
+
+static inline void mv_pp2x_txq_sent_counter_clear(void *arg)
+{
+ struct mv_pp2x_port *port = arg;
+ int queue;
+
+ for (queue = 0; queue < port->num_tx_queues; queue++) {
+ int id = port->txqs[queue]->id;
+
+ if (port->priv->pp2_version == PPV21)
+ mv_pp2x_read(&port->priv->hw,
+ MVPP21_TXQ_SENT_REG(id));
+ else
+ mv_pp2x_read(&port->priv->hw,
+ MVPP22_TXQ_SENT_REG(id));
+ }
+}
+
+static inline u8 *mv_pp21_rxdesc_cookie_get(
+ struct mv_pp2x_rx_desc *rx_desc)
+{
+ return((u8 *)((uintptr_t)rx_desc->u.pp21.buf_cookie));
+}
+
+static inline dma_addr_t mv_pp21_rxdesc_phys_addr_get(
+ struct mv_pp2x_rx_desc *rx_desc)
+{
+ return (dma_addr_t)rx_desc->u.pp21.buf_phys_addr;
+}
+
+/*YuvalC: Below functions are intended to support both aarch64 & aarch32 */
+static inline u8 *mv_pp22_rxdesc_cookie_get(
+ struct mv_pp2x_rx_desc *rx_desc)
+{
+ return((u8 *)((uintptr_t)
+ (rx_desc->u.pp22.buf_cookie_bm_qset_cls_info &
+ DMA_BIT_MASK(40))));
+}
+
+static inline dma_addr_t mv_pp22_rxdesc_phys_addr_get(
+ struct mv_pp2x_rx_desc *rx_desc)
+{
+ return((dma_addr_t)
+ (rx_desc->u.pp22.buf_phys_addr_key_hash &
+ DMA_BIT_MASK(40)));
+}
+
+static inline struct sk_buff *mv_pp21_txdesc_cookie_get(
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ return((struct sk_buff *)((uintptr_t)tx_desc->u.pp21.buf_cookie));
+}
+
+static inline dma_addr_t mv_pp21_txdesc_phys_addr_get(
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ return (dma_addr_t)tx_desc->u.pp21.buf_phys_addr;
+}
+
+static inline struct sk_buff *mv_pp22_txdesc_cookie_get(
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ return((struct sk_buff *)((uintptr_t)
+ (tx_desc->u.pp22.buf_cookie_bm_qset_hw_cmd3 &
+ DMA_BIT_MASK(40))));
+}
+
+static inline dma_addr_t mv_pp22_txdesc_phys_addr_get(
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ return((dma_addr_t)
+ (tx_desc->u.pp22.buf_phys_addr_hw_cmd2 & DMA_BIT_MASK(40)));
+}
+
+static inline dma_addr_t mv_pp2x_txdesc_phys_addr_get(
+ enum mvppv2_version pp2_ver, struct mv_pp2x_tx_desc *tx_desc)
+{
+ if (pp2_ver == PPV21)
+ return mv_pp21_txdesc_phys_addr_get(tx_desc);
+
+ return mv_pp22_txdesc_phys_addr_get(tx_desc);
+}
+
+static inline void mv_pp21_txdesc_phys_addr_set(dma_addr_t phys_addr,
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ tx_desc->u.pp21.buf_phys_addr = phys_addr;
+}
+
+static inline void mv_pp22_txdesc_phys_addr_set(dma_addr_t phys_addr,
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ u64 *buf_phys_addr_p = &tx_desc->u.pp22.buf_phys_addr_hw_cmd2;
+
+#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ *buf_phys_addr_p &= ~(DMA_BIT_MASK(40));
+ *buf_phys_addr_p |= phys_addr & DMA_BIT_MASK(40);
+#else
+ *((dma_addr_t *)buf_phys_addr_p) = phys_addr;
+ *((u8 *)buf_phys_addr_p + sizeof(dma_addr_t)) = 0; /*5th byte*/
+#endif
+}
+
+static inline void mv_pp2x_txdesc_phys_addr_set(enum mvppv2_version pp2_ver,
+ dma_addr_t phys_addr, struct mv_pp2x_tx_desc *tx_desc)
+{
+ if (pp2_ver == PPV21)
+ mv_pp21_txdesc_phys_addr_set(phys_addr, tx_desc);
+ else
+ mv_pp22_txdesc_phys_addr_set(phys_addr, tx_desc);
+}
+
+int mv_pp2x_ptr_validate(const void *ptr);
+int mv_pp2x_range_validate(int value, int min, int max);
+
+int mv_pp2x_prs_hw_read(struct mv_pp2x_hw *hw, struct mv_pp2x_prs_entry *pe);
+
+int mv_pp2x_prs_default_init(struct platform_device *pdev,
+ struct mv_pp2x_hw *hw);
+void mv_pp2x_prs_mac_promisc_set(struct mv_pp2x_hw *hw, int port, bool add);
+void mv_pp2x_prs_mac_multi_set(struct mv_pp2x_hw *hw, int port, int index,
+ bool add);
+int mv_pp2x_prs_mac_da_accept(struct mv_pp2x_port *port,
+ const u8 *da, bool add);
+int mv_pp2x_prs_def_flow(struct mv_pp2x_port *port);
+int mv_pp2x_prs_flow_set(struct mv_pp2x_port *port);
+void mv_pp2x_prs_mac_entry_del(struct mv_pp2x_port *port,
+ enum mv_pp2x_l2_cast l2_cast,
+ enum mv_pp2x_mac_del_option op);
+int mv_pp2x_prs_tag_mode_set(struct mv_pp2x_hw *hw, int port, int type);
+int mv_pp2x_prs_update_mac_da(struct net_device *dev, const u8 *da);
+void mv_pp2x_prs_flow_id_attr_init(void);
+int mv_pp2x_prs_flow_id_attr_get(int flow_id);
+
+int mv_pp2x_cls_init(struct platform_device *pdev, struct mv_pp2x_hw *hw);
+void mv_pp2x_cls_port_config(struct mv_pp2x_port *port);
+void mv_pp2x_cls_config(struct mv_pp2x_hw *hw);
+void mv_pp2x_cls_oversize_rxq_set(struct mv_pp2x_port *port);
+void mv_pp2x_cls_lookup_read(struct mv_pp2x_hw *hw, int lkpid, int way,
+ struct mv_pp2x_cls_lookup_entry *le);
+void mv_pp2x_cls_flow_tbl_temp_copy(struct mv_pp2x_hw *hw, int lkpid,
+ int *temp_flow_idx);
+void mv_pp2x_cls_lkp_flow_set(struct mv_pp2x_hw *hw, int lkpid, int way,
+ int flow_idx);
+void mv_pp2x_cls_flow_port_add(struct mv_pp2x_hw *hw, int index, int port_id);
+void mv_pp2x_cls_flow_port_del(struct mv_pp2x_hw *hw, int index, int port_id);
+
+void mv_pp2x_txp_max_tx_size_set(struct mv_pp2x_port *port);
+void mv_pp2x_tx_done_time_coal_set(struct mv_pp2x_port *port, u32 usec);
+void mv_pp21_gmac_max_rx_size_set(struct mv_pp2x_port *port);
+
+int mv_pp2x_txq_pend_desc_num_get(struct mv_pp2x_port *port,
+ struct mv_pp2x_tx_queue *txq);
+u32 mv_pp2x_txq_desc_csum(int l3_offs, int l3_proto,
+ int ip_hdr_len, int l4_proto);
+struct mv_pp2x_tx_desc *mv_pp2x_txq_next_desc_get(
+ struct mv_pp2x_aggr_tx_queue *aggr_txq);
+struct mv_pp2x_tx_desc *mv_pp2x_txq_prev_desc_get(
+ struct mv_pp2x_aggr_tx_queue *aggr_txq);
+int mv_pp2x_txq_alloc_reserved_desc(struct mv_pp2x *priv,
+ struct mv_pp2x_tx_queue *txq,
+ int num, int cpu);
+void mv_pp2x_aggr_txq_pend_desc_add(struct mv_pp2x_port *port, int pending);
+int mv_pp2x_aggr_desc_num_read(struct mv_pp2x *priv, int cpu);
+int mv_pp2x_aggr_desc_num_check(struct mv_pp2x *priv,
+ struct mv_pp2x_aggr_tx_queue *aggr_txq,
+ int num, int cpu);
+void mv_pp2x_rxq_offset_set(struct mv_pp2x_port *port,
+ int prxq, int offset);
+void mv_pp2x_bm_pool_bufsize_set(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_bm_pool *bm_pool,
+ int buf_size);
+void mv_pp2x_pool_refill(struct mv_pp2x *priv, u32 pool,
+ dma_addr_t phys_addr, int cpu);
+
+void mv_pp2x_pool_refill_virtual(struct mv_pp2x *priv, u32 pool,
+ dma_addr_t phys_addr, u8 *cookie);
+void mv_pp21_rxq_long_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int long_pool);
+void mv_pp21_rxq_short_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int short_pool);
+
+void mv_pp22_rxq_long_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int long_pool);
+void mv_pp22_rxq_short_pool_set(struct mv_pp2x_hw *hw,
+ int prxq, int short_pool);
+
+void mv_pp21_port_mii_set(struct mv_pp2x_port *port);
+void mv_pp21_port_fc_adv_enable(struct mv_pp2x_port *port);
+void mv_pp21_port_enable(struct mv_pp2x_port *port);
+void mv_pp21_port_disable(struct mv_pp2x_port *port);
+
+void mv_pp2x_ingress_enable(struct mv_pp2x_port *port);
+void mv_pp2x_ingress_disable(struct mv_pp2x_port *port);
+void mv_pp2x_egress_enable(struct mv_pp2x_port *port);
+void mv_pp2x_egress_disable(struct mv_pp2x_port *port);
+
+void mv_pp21_port_periodic_xon_disable(struct mv_pp2x_port *port);
+void mv_pp21_port_loopback_set(struct mv_pp2x_port *port);
+void mv_pp21_port_reset(struct mv_pp2x_port *port);
+
+void mv_pp2x_rx_pkts_coal_set(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_queue *rxq);
+void mv_pp2x_rx_time_coal_set(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_queue *rxq);
+void mv_pp2x_tx_done_pkts_coal_set(void *arg);
+void mv_pp2x_cause_error(struct net_device *dev, int cause);
+void mv_pp2x_rx_error(struct mv_pp2x_port *port,
+ struct mv_pp2x_rx_desc *rx_desc);
+void mv_pp2x_rx_csum(struct mv_pp2x_port *port, u32 status,
+ struct sk_buff *skb);
+void mv_pp21_get_mac_address(struct mv_pp2x_port *port, unsigned char *addr);
+
+int mv_pp2x_c2_init(struct platform_device *pdev, struct mv_pp2x_hw *hw);
+
+int mv_pp2x_prs_sw_sram_shift_set(struct mv_pp2x_prs_entry *pe, int shift,
+ unsigned int op);
+int mv_pp2x_prs_sw_sram_shift_get(struct mv_pp2x_prs_entry *pe, int *shift);
+int mv_pp2x_prs_sw_sram_next_lu_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *lu);
+int mv_pp2x_prs_sram_bit_get(struct mv_pp2x_prs_entry *pe, int bit_num,
+ unsigned int *bit);
+int mv_pp2x_prs_sw_sram_lu_done_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bit);
+int mv_pp2x_prs_sw_sram_flowid_gen_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bit);
+int mv_pp2x_prs_sw_sram_ri_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bits,
+ unsigned int *enable);
+int mv_pp2x_prs_sw_sram_ai_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *bits,
+ unsigned int *enable);
+int mv_pp2x_prs_sw_sram_offset_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int type,
+ int offset, unsigned int op);
+int mv_pp2x_prs_sw_sram_offset_get(struct mv_pp2x_prs_entry *pe,
+ unsigned int *type,
+ int *offset, unsigned int *op);
+void mv_pp2x_prs_hw_port_init(struct mv_pp2x_hw *hw, int port,
+ int lu_first, int lu_max, int offset);
+void mv_pp2x_prs_sw_clear(struct mv_pp2x_prs_entry *pe);
+void mv_pp2x_prs_hw_inv(struct mv_pp2x_hw *hw, int index);
+void mv_pp2x_prs_tcam_lu_set(struct mv_pp2x_prs_entry *pe, unsigned int lu);
+void mv_pp2x_prs_tcam_port_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int port, bool add);
+void mv_pp2x_prs_tcam_port_map_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int ports);
+void mv_pp2x_prs_tcam_data_byte_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int offs,
+ unsigned char byte,
+ unsigned char enable);
+void mv_pp2x_prs_tcam_ai_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits,
+ unsigned int enable);
+void mv_pp2x_prs_sram_ri_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits, unsigned int mask);
+void mv_pp2x_prs_sram_ai_update(struct mv_pp2x_prs_entry *pe,
+ unsigned int bits, unsigned int mask);
+void mv_pp2x_prs_sram_next_lu_set(struct mv_pp2x_prs_entry *pe,
+ unsigned int lu);
+void mv_pp2x_prs_sw_sram_lu_done_set(struct mv_pp2x_prs_entry *pe);
+void mv_pp2x_prs_sw_sram_lu_done_clear(struct mv_pp2x_prs_entry *pe);
+void mv_pp2x_prs_sw_sram_flowid_set(struct mv_pp2x_prs_entry *pe);
+void mv_pp2x_prs_sw_sram_flowid_clear(struct mv_pp2x_prs_entry *pe);
+int mv_pp2x_prs_hw_write(struct mv_pp2x_hw *hw, struct mv_pp2x_prs_entry *pe);
+int mv_pp2x_cls_hw_lkp_read(struct mv_pp2x_hw *hw, int lkpid, int way,
+ struct mv_pp2x_cls_lookup_entry *fe);
+int mv_pp2x_cls_hw_lkp_write(struct mv_pp2x_hw *hw, int lkpid, int way,
+ struct mv_pp2x_cls_lookup_entry *fe);
+int mv_pp2x_cls_lkp_port_way_set(struct mv_pp2x_hw *hw, int port, int way);
+int mv_pp2x_cls_sw_lkp_rxq_get(struct mv_pp2x_cls_lookup_entry *lkp, int *rxq);
+int mv_pp2x_cls_sw_lkp_rxq_set(struct mv_pp2x_cls_lookup_entry *fe, int rxq);
+int mv_pp2x_cls_sw_lkp_en_get(struct mv_pp2x_cls_lookup_entry *lkp, int *en);
+int mv_pp2x_cls_sw_lkp_en_set(struct mv_pp2x_cls_lookup_entry *lkp, int en);
+int mv_pp2x_cls_sw_lkp_flow_get(struct mv_pp2x_cls_lookup_entry *lkp,
+ int *flow_idx);
+int mv_pp2x_cls_sw_lkp_flow_set(struct mv_pp2x_cls_lookup_entry *lkp,
+ int flow_idx);
+int mv_pp2x_cls_sw_lkp_mod_get(struct mv_pp2x_cls_lookup_entry *lkp,
+ int *mod_base);
+int mv_pp2x_cls_sw_lkp_mod_set(struct mv_pp2x_cls_lookup_entry *lkp,
+ int mod_base);
+int mv_pp2x_cls_hw_flow_read(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_flow_entry *fe);
+int mv_pp2x_cls_sw_flow_hek_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *num_of_fields, int field_ids[]);
+int mv_pp2x_cls_sw_flow_port_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *type, int *portid);
+
+int mv_pp2x_cls_hw_lkp_hit_get(struct mv_pp2x_hw *hw, int lkpid, int way,
+ unsigned int *cnt);
+void mv_pp2x_cls_flow_write(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_flow_entry *fe);
+int mv_pp2x_cls_sw_flow_port_set(struct mv_pp2x_cls_flow_entry *fe,
+ int type, int portid);
+int mv_pp2x_cls_sw_flow_hek_num_set(struct mv_pp2x_cls_flow_entry *fe,
+ int num_of_fields);
+int mv_pp2x_cls_sw_flow_hek_set(struct mv_pp2x_cls_flow_entry *fe,
+ int field_index, int field_id);
+int mv_pp2x_cls_sw_flow_portid_select(struct mv_pp2x_cls_flow_entry *fe,
+ int from);
+int mv_pp2x_cls_sw_flow_pppoe_set(struct mv_pp2x_cls_flow_entry *fe, int mode);
+int mv_pp2x_cls_sw_flow_vlan_set(struct mv_pp2x_cls_flow_entry *fe, int mode);
+int mv_pp2x_cls_sw_flow_macme_set(struct mv_pp2x_cls_flow_entry *fe, int mode);
+int mv_pp2x_cls_sw_flow_udf7_set(struct mv_pp2x_cls_flow_entry *fe, int mode);
+int mv_pp2x_cls_sw_flow_seq_ctrl_set(struct mv_pp2x_cls_flow_entry *fe,
+ int mode);
+int mv_pp2x_cls_sw_flow_engine_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *engine, int *is_last);
+int mv_pp2x_cls_sw_flow_engine_set(struct mv_pp2x_cls_flow_entry *fe,
+ int engine, int is_last);
+int mv_pp2x_cls_sw_flow_extra_get(struct mv_pp2x_cls_flow_entry *fe,
+ int *type, int *prio);
+int mv_pp2x_cls_sw_flow_extra_set(struct mv_pp2x_cls_flow_entry *fe,
+ int type, int prio);
+int mv_pp2x_cls_hw_flow_hit_get(struct mv_pp2x_hw *hw,
+ int index, unsigned int *cnt);
+int mv_pp2x_cls_hw_udf_set(struct mv_pp2x_hw *hw, int udf_no, int offs_id,
+ int offs_bits, int size_bits);
+int mv_pp2x_cls_c2_qos_hw_read(struct mv_pp2x_hw *hw, int tbl_id, int tbl_sel,
+ int tbl_line,
+ struct mv_pp2x_cls_c2_qos_entry *qos);
+int mv_pp2x_cls_c2_qos_hw_write(struct mv_pp2x_hw *hw,
+ struct mv_pp2x_cls_c2_qos_entry *qos);
+int mv_pp2_cls_c2_qos_prio_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *prio);
+int mv_pp2_cls_c2_qos_dscp_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *dscp);
+int mv_pp2_cls_c2_qos_color_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *color);
+int mv_pp2_cls_c2_qos_gpid_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *gpid);
+int mv_pp2_cls_c2_qos_queue_get(struct mv_pp2x_cls_c2_qos_entry *qos, int *queue);
+int mv_pp2x_cls_c2_qos_tbl_set(struct mv_pp2x_cls_c2_entry *c2, int tbl_id,
+ int tbl_sel);
+int mv_pp2x_cls_c2_hw_write(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_c2_entry *c2);
+int mv_pp2x_cls_c2_hw_read(struct mv_pp2x_hw *hw, int index,
+ struct mv_pp2x_cls_c2_entry *c2);
+int mv_pp2x_cls_c2_hit_cntr_clear_all(struct mv_pp2x_hw *hw);
+int mv_pp2x_cls_c2_hit_cntr_read(struct mv_pp2x_hw *hw, int index, u32 *cntr);
+int mv_pp2x_cls_c2_rule_set(struct mv_pp2x_port *port, u8 start_queue);
+u8 mv_pp2x_cls_c2_rule_queue_get(struct mv_pp2x_hw *hw, u32 rule_idx);
+void mv_pp2x_cls_c2_rule_queue_set(struct mv_pp2x_hw *hw, u32 rule_idx,
+ u8 queue);
+u8 mv_pp2x_cls_c2_pbit_tbl_queue_get(struct mv_pp2x_hw *hw, u8 tbl_id,
+ u8 tbl_line);
+void mv_pp2x_cls_c2_pbit_tbl_queue_set(struct mv_pp2x_hw *hw, u8 tbl_id,
+ u8 tbl_line, u8 queue);
+int mv_pp2x_cls_c2_hw_inv(struct mv_pp2x_hw *hw, int index);
+void mv_pp2x_cls_c2_hw_inv_all(struct mv_pp2x_hw *hw);
+int mv_pp2x_cls_c2_tcam_byte_set(struct mv_pp2x_cls_c2_entry *c2,
+ unsigned int offs,
+ unsigned char byte,
+ unsigned char enable);
+int mv_pp2x_cls_c2_qos_queue_set(struct mv_pp2x_cls_c2_qos_entry *qos,
+ u8 queue);
+int mv_pp2x_cls_c2_color_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int from);
+int mv_pp2x_cls_c2_prio_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int prio, int from);
+int mv_pp2x_cls_c2_dscp_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int dscp, int from);
+int mv_pp2x_cls_c2_queue_low_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int queue, int from);
+int mv_pp2x_cls_c2_queue_high_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int queue, int from);
+int mv_pp2x_cls_c2_forward_set(struct mv_pp2x_cls_c2_entry *c2, int cmd);
+int mv_pp2x_cls_c2_rss_set(struct mv_pp2x_cls_c2_entry *c2, int cmd,
+ int rss_en);
+int mv_pp2x_cls_c2_flow_id_en(struct mv_pp2x_cls_c2_entry *c2,
+ int flowid_en);
+
+int mv_pp22_rss_tbl_entry_set(struct mv_pp2x_hw *hw,
+ struct mv_pp22_rss_entry *rss);
+int mv_pp22_rss_tbl_entry_get(struct mv_pp2x_hw *hw,
+ struct mv_pp22_rss_entry *rss);
+
+int mv_pp22_rss_rxq_set(struct mv_pp2x_port *port, u32 cos_width);
+
+void mv_pp22_rss_c2_enable(struct mv_pp2x_port *port, bool en);
+
+void mv_pp2x_tx_fifo_size_set(struct mv_pp2x_hw *hw, u32 port_id, u32 val);
+
+void mv_pp2x_tx_fifo_threshold_set(struct mv_pp2x_hw *hw, u32 port_id, u32 val);
+
+int mv_pp2x_check_hw_buf_num(struct mv_pp2x *priv, struct mv_pp2x_bm_pool *bm_pool);
+void mv_pp22_set_net_comp(struct mv_pp2x *priv);
+int mvcpn110_mac_hw_init(struct mv_pp2x_port *port);
+
+#endif /* _MVPP2_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw_type.h b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw_type.h
new file mode 100644
index 000000000000..4cb6087aa583
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_hw_type.h
@@ -0,0 +1,2847 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2016 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#ifndef _MVPP2_HW_TYPE_H_
+#define _MVPP2_HW_TYPE_H_
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#define CREATE_MASK(pos, len) GENMASK((pos) + (len) - 1, (pos))
+#define CREATE_MASK_ULL(pos, len) GENMASK_ULL((pos) + (len) - 1, (pos))
+
+#define AUTO_MASK(reg_name) CREATE_MASK(reg_name##_OFFS, reg_name##_SIZE)
+
+/*All PPV22 Addresses are 40-bit */
+#define MVPP22_ADDR_HIGH_SIZE 8
+#define MVPP22_ADDR_HIGH_MASK ((1 << MVPP22_ADDR_HIGH_SIZE) - 1)
+
+/*PPV22 ADDRESS SPACE */
+#define MVPP2_ADDR_SPACE_SIZE (64 * 1024)
+
+/*TODO*/
+/*AXI_BRIDGE*/
+/*AXI_CONTEXT*/
+/*Top Regfile*/
+
+#define MVPP21_DESC_ADDR_SHIFT 0 /*Applies to RXQ, AGGR_TXQ*/
+#define MVPP22_DESC_ADDR_SHIFT (9 - 1) /*Applies to RXQ, AGGR_TXQ*/
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
+#define MVPP2_RX_FIFO_INIT_REG 0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
+#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
+#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
+#define MVPP2_POOL_BUF_SIZE_OFFSET 5
+
+/* RXQ_CONFIG_REF Generic+PPV21+PPV22 */
+#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
+#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
+#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
+#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
+#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
+#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+
+#define MVPP21_RXQ_POOL_SHORT_OFFS 20
+#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP21_RXQ_POOL_LONG_OFFS 24
+#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
+
+#define MVPP22_RXQ_POOL_SHORT_OFFS 20
+#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
+#define MVPP22_RXQ_POOL_LONG_OFFS 24
+#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
+#define MVPP22_RXQ_LLC_DEP_HDR_SIZE 0xf000
+#define MVPP22_RXQ_LLC_DEP_ENABLE BIT(16)
+
+#define MVPP21_ETH_RX_HWQ_REG(txq) (0xc00 + 4 * (txq))
+#define MVPP21_ETH_RX_HWQ_POOL_SHORT_OFFS 0
+#define MVPP21_ETH_RX_HWQ_POOL_SHORT_MASK 0x7
+#define MVPP21_ETH_RX_HWQ_POOL_LONG_OFFS 4
+#define MVPP21_ETH_RX_HWQ_POOL_LONG_MASK 0x70
+#define MVPP21_ETH_RX_HWQ_DISABLE_MASK BIT(31)
+#define MVPP22_ETH_RX_HWQ_REG(txq) (0xe00 + 4 * (txq))
+#define MVPP22_ETH_RX_HWQ_POOL_SHORT_OFFS 0
+#define MVPP22_ETH_RX_HWQ_POOL_SHORT_MASK 0xf
+#define MVPP22_ETH_RX_HWQ_POOL_LONG_OFFS 4
+#define MVPP22_ETH_RX_HWQ_POOL_LONG_MASK 0xf0
+#define MVPP22_ETH_RX_HWQ_DISABLE_MASK BIT(31)
+
+#define MVPP22_RX_HWF_SNOOP_REG (0x178)
+#define MVPP22_RX_HWF_SNOOP_ENABLE (BIT(0))
+
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_CACHE_SIZE 4
+#define MVPP22_AXI_ATTR_CACHE_MASK AUTO_MASK(MVPP22_AXI_ATTR_CACHE)
+
+#define MVPP22_AXI_ATTR_QOS_OFFS 4
+#define MVPP22_AXI_ATTR_QOS_SIZE 4
+#define MVPP22_AXI_ATTR_QOS_MASK AUTO_MASK(MVPP22_AXI_ATTR_QOS)
+
+#define MVPP22_AXI_ATTR_TC_OFFS 8
+#define MVPP22_AXI_ATTR_TC_SIZE 4
+#define MVPP22_AXI_ATTR_TC_MASK AUTO_MASK(MVPP22_AXI_ATTR_TC)
+
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+#define MVPP22_AXI_ATTR_DOMAIN_SIZE 2
+#define MVPP22_AXI_ATTR_DOMAIN_MASK AUTO_MASK(MVPP22_AXI_ATTR_DOMAIN)
+
+#define MVPP22_AXI_ATTR_NON_CACHE ((0x3 << MVPP22_AXI_ATTR_DOMAIN_OFFS) + \
+ (0x3 << MVPP22_AXI_ATTR_CACHE_OFFS))
+
+#define MVPP22_AXI_ATTR_SW_COH_WRITE ((0x0 << MVPP22_AXI_ATTR_DOMAIN_OFFS) + \
+ (0x7 << MVPP22_AXI_ATTR_CACHE_OFFS))
+
+#define MVPP22_AXI_ATTR_SW_COH_READ ((0x0 << MVPP22_AXI_ATTR_DOMAIN_OFFS) + \
+ (0xB << MVPP22_AXI_ATTR_CACHE_OFFS))
+
+#define MVPP22_AXI_ATTR_HW_COH_WRITE ((0x2 << MVPP22_AXI_ATTR_DOMAIN_OFFS) + \
+ (0x7 << MVPP22_AXI_ATTR_CACHE_OFFS))
+
+#define MVPP22_AXI_ATTR_HW_COH_READ ((0x2 << MVPP22_AXI_ATTR_DOMAIN_OFFS) + \
+ (0xB << MVPP22_AXI_ATTR_CACHE_OFFS))
+
+#define MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT BIT(16)
+
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
+#define MVPP22_AXI_WR_DEP_CODE_REG 0x4168
+
+#define MVPP22_AXI_CODE_CACHE_OFFS 0
+#define MVPP22_AXI_CODE_CACHE_SIZE 4
+#define MVPP22_AXI_CODE_CACHE_MASK AUTO_MASK(MVPP22_AXI_CODE_CACHE)
+
+#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
+#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xB
+#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
+
+#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
+#define MVPP22_AXI_CODE_DOMAIN_SIZE 2
+#define MVPP22_AXI_CODE_DOMAIN_MASK AUTO_MASK(MVPP22_AXI_CODE_DOMAIN)
+
+#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
+#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
+#define MVPP22_AXI_CODE_DOMAIN_NON_SHARE 0
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
+#define MVPP2_PRS_PORT_LU_MAX 0xf
+#define MVPP2_PRS_MAX_LOOP_MIN 0x1
+#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
+#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
+#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_BITS 6
+#define MVPP2_PRS_INIT_OFF_MAX ((1 << MVPP2_PRS_INIT_OFF_BITS) - 1)
+#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
+#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
+
+#define MVPP2_PRS_EXP_REG 0x1214
+#define MVPP2_PRS_EXP_MISS 0
+#define MVPP2_PRS_EXP_EXEED 1
+#define MVPP2_PRS_EXP_OF 2
+
+#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
+#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+#define MVPP2_PRS_INTR_CAUSE_REG (0x1020)
+#define MVPP2_PRS_INTR_MASK_REG (0x1024)
+
+/*PPv2.1 MASS 3.20 new feature */
+#define MVPP2_PRS_TCAM_HIT_IDX_REG 0x1240
+/*----------------------------------------------------------------------*/
+/*PPv2.1 MASS 3.20 new feature */
+#define MVPP2_PRS_TCAM_HIT_CNT_REG 0x1244
+#define MVPP2_PRS_TCAM_HIT_CNT_BITS 16
+#define MVPP2_PRS_TCAM_HIT_CNT_OFFS 0
+#define MVPP2_PRS_TCAM_HIT_CNT_MASK \
+ (((1 << MVPP2_PRS_TCAM_HIT_CNT_BITS) - 1) << \
+ MVPP2_PRS_TCAM_HIT_CNT_OFFS)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG 0x1800
+#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG 0x1810
+#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG 0x1814
+#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
+#define MVPP2_CLS_LKP_INDEX_LKP_OFFS 0
+#define MVPP2_CLS_LKP_TBL_REG 0x1818
+#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
+#define MVPP22_CLS_LKP_TBL_SEL_REG 0x181c
+#define MVPP22_CLS_LKP_TBL_SEL_CDT_MASK BIT(0)
+#define MVPP22_CLS_LKP_TBL_SEL_FDT_MASK BIT(1)
+#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+
+#define MVPP2_CLS_PORT_SPID_REG 0x1830
+
+#define MVPP2_CLS_PORT_SPID_BITS 2
+#define MVPP2_CLS_PORT_SPID_MAX \
+ ((1 << MVPP2_CLS_PORT_SPID_BITS) - 1)
+#define MVPP2_CLS_PORT_SPID_MASK(port) ((MVPP2_CLS_PORT_SPID_MAX) << \
+ ((port) * MVPP2_CLS_PORT_SPID_BITS))
+#define MVPP2_CLS_PORT_SPID_VAL(port, val) ((val) << \
+ ((port) * MVPP2_CLS_PORT_SPID_BITS))
+
+/* PORT - SPID types */
+#define MVPP2_PORT_SPID_MH 0
+#define MVPP2_PORT_SPID_EXT_SWITCH 1
+#define MVPP2_PORT_SPID_CAS_SWITCH 2
+#define MVPP2_PORT_SPID_PORT_TRUNK 3
+/*----------------------------------------------------------------------*/
+
+#define MVPP2_CLS_SPID_UNI_BASE_REG 0x1840
+#define MVPP2_CLS_SPID_UNI_REG(spid) (MVPP2_CLS_SPID_UNI_BASE_REG + \
+ (((spid) >> 3) * 4))
+
+#define MVPP2_CLS_SPID_MAX 31
+#define MVPP2_CLS_SPID_UNI_REGS 4
+#define MVPP2_CLS_SPID_UNI_BITS 3
+#define MVPP2_CLS_SPID_UNI_FIXED_BITS 4
+#define MVPP2_CLS_SPID_UNI_MAX ((1 << \
+ MVPP2_CLS_SPID_UNI_BITS) - 1)
+#define MVPP2_CLS_SPID_UNI_OFFS(spid) (((spid) % 8) * \
+ MVPP2_CLS_SPID_UNI_FIXED_BITS)
+#define MVPP2_CLS_SPID_UNI_MASK(spid) ((MVPP2_CLS_SPID_UNI_MAX) << \
+ (MVPP2_CLS_SPID_UNI_OFFS(spid)))
+#define MVPP2_CLS_SPID_UNI_VAL(spid, val) ((val) << \
+ (MVPP2_CLS_SPID_UNI_OFFS(spid)))
+
+/*----------------------------------------------------------------------*/
+#define MVPP2_CLS_GEM_VIRT_INDEX_REG 0x1A00
+#define MVPP2_CLS_GEM_VIRT_INDEX_BITS (7)
+#define MVPP2_CLS_GEM_VIRT_INDEX_MAX (((1 << \
+ MVPP2_CLS_GEM_VIRT_INDEX_BITS) - 1) << 0)
+
+/*----------------------------------------------------------------------*/
+
+/* indirect rd/wr via index GEM_VIRT_INDEX */
+#define MVPP2_CLS_GEM_VIRT_REGS_NUM 128
+#define MVPP2_CLS_GEM_VIRT_REG 0x1A04
+
+#define MVPP2_CLS_GEM_VIRT_BITS 12
+#define MVPP2_CLS_GEM_VIRT_MAX ((1 << \
+ MVPP2_CLS_GEM_VIRT_BITS) - 1)
+#define MVPP2_CLS_GEM_VIRT_MASK (((1 << \
+ MVPP2_CLS_GEM_VIRT_BITS) - 1) << 0)
+
+/*----------------------------------------------------------------------*/
+#define MVPP2_CLS_UDF_BASE_REG 0x1860
+#define MVPP2_CLS_UDF_REG(index) (MVPP2_CLS_UDF_BASE_REG + \
+ ((index) * 4)) /*index <=63*/
+#define MVPP2_CLS_UDF_REGS_NUM 64
+
+#define MVPP2_CLS_UDF_BASE_REGS 8
+#define MVPP2_CLS_UDF_OFFSET_ID_OFFS 0
+#define MVPP2_CLS_UDF_OFFSET_ID_BITS 4
+#define MVPP2_CLS_UDF_OFFSET_ID_MAX ((1 << \
+ MVPP2_CLS_UDF_OFFSET_ID_BITS) - 1)
+#define MVPP2_CLS_UDF_OFFSET_ID_MASK \
+ ((MVPP2_CLS_UDF_OFFSET_ID_MAX) << MVPP2_CLS_UDF_OFFSET_ID_OFFS)
+
+#define MVPP2_CLS_UDF_OFFSET_PACKET 0
+#define MVPP2_CLS_UDF_OFFSET_L3 1
+#define MVPP2_CLS_UDF_OFFSET_L4 4
+#define MVPP2_CLS_UDF_OFFSET_OUTVLAN 8
+#define MVPP2_CLS_UDF_OFFSET_INVLAN 9
+#define MVPP2_CLS_UDF_OFFSET_ETHTYPE 0xa
+
+#define MVPP2_CLS_UDF_REL_OFFSET_OFFS 4
+#define MVPP2_CLS_UDF_REL_OFFSET_BITS 11
+#define MVPP2_CLS_UDF_REL_OFFSET_MAX ((1 << \
+ MVPP2_CLS_UDF_REL_OFFSET_BITS) - 1)
+#define MVPP2_CLS_UDF_REL_OFFSET_MASK \
+ ((MVPP2_CLS_UDF_REL_OFFSET_MAX) << MVPP2_CLS_UDF_REL_OFFSET_OFFS)
+
+#define MVPP2_CLS_UDF_SIZE_OFFS 16
+#define MVPP2_CLS_UDF_SIZE_BITS 8
+#define MVPP2_CLS_UDF_SIZE_MAX ((1 << \
+ MVPP2_CLS_UDF_SIZE_BITS) - 1)
+#define MVPP2_CLS_UDF_SIZE_MASK (((1 << \
+ MVPP2_CLS_UDF_SIZE_BITS) - 1) << MVPP2_CLS_UDF_SIZE_OFFS)
+/*----------------------------------------------------------------------*/
+
+#define MVPP2_CLS_MTU_BASE_REG 0x1900
+/* in PPv2.1 (feature MAS 3.7) num indicate an mtu reg index
+ * in PPv2.0 num (<=31) indicate eport number , 0-15 pon txq, 16-23 ethernet
+ */
+#define MVPP2_CLS_MTU_REG(num) (MVPP2_CLS_MTU_BASE_REG + \
+ ((num) * 4))
+#define MVPP2_CLS_MTU_OFFS 0
+#define MVPP2_CLS_MTU_BITS 16
+#define MVPP2_CLS_MTU_MAX ((1 << \
+ MVPP2_CLS_MTU_BITS) - 1)
+#define MVPP2_CLS_MTU_MASK (((1 << \
+ MVPP2_CLS_MTU_BITS) - 1) << MVPP2_CLS_MTU_OFFS)
+/*----------------------------------------------------------------------*/
+
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
+#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+
+/*PPv2.1 new feature MAS 3.14*/
+#define MVPP2_CLS_SEQ_SIZE_REG 0x19D4
+#define MVPP2_CLS_SEQ_SIZE_BITS 4
+#define MVPP2_CLS_SEQ_INDEX_MAX 7
+#define MVPP2_CLS_SEQ_SIZE_MAX 8
+#define MVPP2_CLS_SEQ_SIZE_MASK(index) \
+ (((1 << MVPP2_CLS_SEQ_SIZE_BITS) - 1) << \
+ (MVPP2_CLS_SEQ_SIZE_BITS * (index)))
+#define MVPP2_CLS_SEQ_SIZE_VAL(index, val) ((val) << ((index) * \
+ MVPP2_CLS_SEQ_SIZE_BITS))
+
+/*PPv2.1 new register MAS 3.18*/
+#define MVPP2_CLS_PCTRL_BASE_REG 0x1880
+#define MV_PP2_CLS_PCTRL_REG(port) (MVPP2_CLS_PCTRL_BASE_REG + \
+ 4 * (port))
+#define MVPP2_CLS_PCTRL_MH_OFFS 0
+#define MVPP2_CLS_PCTRL_MH_BITS 16
+#define MVPP2_CLS_PCTRL_MH_MASK (((1 << \
+ MVPP2_CLS_PCTRL_MH_BITS) - 1) << MVPP2_CLS_PCTRL_MH_OFFS)
+
+#define MVPP2_CLS_PCTRL_VIRT_EN_OFFS 16
+#define MVPP2_CLS_PCTRL_VIRT_EN_MASK (1 << \
+ MVPP2_CLS_PCTRL_VIRT_EN_OFFS)
+
+#define MVPP2_CLS_PCTRL_UNI_EN_OFFS 17
+#define MVPP2_CLS_PCTRL_UNI_EN_MASK (1 << \
+ MVPP2_CLS_PCTRL_UNI_EN_OFFS)
+
+/*----------------------------------------------------------------------*/
+
+#define MV_PP2_OVERRUN_DROP_REG(port) (0x7000 + 4 * (port))
+#define MV_PP2_CLS_DROP_REG(port) (0x7020 + 4 * (port))
+
+#define MVPP2_CNT_IDX_REG 0x7040
+/* LKP counters index */
+#define MVPP2_CNT_IDX_LKP(lkp, way) ((way) << 6 | (lkp))
+/* Flow counters index */
+#define MVPP2_CNT_IDX_FLOW(index) (index)
+/* TX counters index */
+#define MVPP2_CNT_IDX_TX(port, txq) (((16 + port) << 3) | (txq))
+
+#define MVPP2_TX_DESC_ENQ_REG 0x7100
+#define MVPP2_TX_DESC_ENQ_TO_DRAM_REG 0x7104
+#define MVPP2_TX_BUF_ENQ_TO_DRAM_REG 0x7108
+#define MVPP2_TX_DESC_HWF_ENQ_REG 0x710c
+#define MVPP2_RX_DESC_ENQ_REG 0x7120
+#define MVPP2_TX_PKT_DQ_REG 0x7130
+
+#define MVPP2_TX_PKT_FULLQ_DROP_REG 0x7200
+#define MVPP2_TX_PKT_EARLY_DROP_REG 0x7204
+#define MVPP2_TX_PKT_BM_DROP_REG 0x7208
+#define MVPP2_TX_PKT_BM_MC_DROP_REG 0x720c
+#define MVPP2_RX_PKT_FULLQ_DROP_REG 0x7220
+#define MVPP2_RX_PKT_EARLY_DROP_REG 0x7224
+#define MVPP2_RX_PKT_BM_DROP_REG 0x7228
+
+#define MVPP2_BM_DROP_CNTR_REG(pool) (0x7300 + 4 * (pool))
+#define MVPP2_BM_MC_DROP_CNTR_REG(pool) (0x7340 + 4 * (pool))
+
+#define MVPP2_PLCR_GREEN_CNTR_REG(plcr) (0x7400 + 4 * (plcr))
+#define MVPP2_PLCR_YELLOW_CNTR_REG(plcr) (0x7500 + 4 * (plcr))
+#define MVPP2_PLCR_RED_CNTR_REG(plcr) (0x7600 + 4 * (plcr))
+
+#define MVPP2_CLS_LKP_TBL_HIT_REG 0x7700
+#define MVPP2_CLS_FLOW_TBL_HIT_REG 0x7704
+#define MVPP2_CLS4_TBL_HIT_REG 0x7708
+
+#define MVPP2_V1_OVERFLOW_MC_DROP_REG 0x770c
+
+/* Classifier C2 Engine Registers */
+#define MVPP2_CLS2_TCAM_IDX_REG 0x1B00
+#define MVPP2_CLS2_TCAM_DATA_REG(idx) (0x1B10 + (idx) * 4)
+#define MVPP2_CLS2_TCAM_INV_REG 0x1B24
+#define MVPP2_CLS2_TCAM_INV_INVALID_OFF 31
+#define MVPP2_CLS2_TCAM_INV_INVALID_MASK BIT(31)
+#define MVPP2_CLS2_ACT_DATA_REG 0x1B30
+#define MVPP2_CLS2_ACT_DATA_TBL_ID_OFF 0
+#define MVPP2_CLS2_ACT_DATA_TBL_ID_MASK 0x3F
+#define MVPP2_CLS2_ACT_DATA_TBL_SEL_OFF 6
+#define MVPP2_CLS2_ACT_DATA_TBL_SEL_MASK 0x40
+#define MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_OFF 7
+#define MVPP2_CLS2_ACT_DATA_TBL_PRI_DSCP_MASK 0x80
+#define MVPP2_CLS2_ACT_DATA_TBL_GEM_ID_OFF 8
+#define MVPP2_CLS2_ACT_DATA_TBL_GEM_ID_MASK 0x100
+#define MVPP2_CLS2_ACT_DATA_TBL_LOW_Q_OFF 9
+#define MVPP2_CLS2_ACT_DATA_TBL_LOW_Q_MASK 0x200
+#define MVPP2_CLS2_ACT_DATA_TBL_HIGH_Q_OFF 10
+#define MVPP2_CLS2_ACT_DATA_TBL_HIGH_Q_MASK 0x400
+#define MVPP2_CLS2_ACT_DATA_TBL_COLOR_OFF 11
+#define MVPP2_CLS2_ACT_DATA_TBL_COLOR_MASK 0x800
+#define MVPP2_CLS2_DSCP_PRI_INDEX_REG 0x1B40
+#define MVPP2_CLS2_DSCP_PRI_INDEX_LINE_OFF 0
+#define MVPP2_CLS2_DSCP_PRI_INDEX_LINE_BITS 6
+#define MVPP2_CLS2_DSCP_PRI_INDEX_LINE_MASK 0x0000003f
+#define MVPP2_CLS2_DSCP_PRI_INDEX_SEL_OFF 6
+#define MVPP2_CLS2_DSCP_PRI_INDEX_SEL_MASK BIT(6)
+#define MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF 8
+#define MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_BITS 6
+#define MVPP2_CLS2_DSCP_PRI_INDEX_TBL_ID_MASK 0x00003f00
+#define MVPP2_CLS2_QOS_TBL_REG 0x1B44
+#define MVPP2_CLS2_QOS_TBL_PRI_OFF 0
+#define MVPP2_CLS2_QOS_TBL_PRI_BITS 3
+#define MVPP2_CLS2_QOS_TBL_PRI_MASK 0x00000007
+#define MVPP2_CLS2_QOS_TBL_DSCP_OFF 3
+#define MVPP2_CLS2_QOS_TBL_DSCP_BITS 6
+#define MVPP2_CLS2_QOS_TBL_DSCP_MASK 0x000001f8
+#define MVPP2_CLS2_QOS_TBL_COLOR_OFF 9
+#define MVPP2_CLS2_QOS_TBL_COLOR_BITS 3
+#define MVPP2_CLS2_QOS_TBL_COLOR_MASK 0x00000e00
+#define MVPP2_CLS2_QOS_TBL_GEMPORT_OFF 12
+#define MVPP2_CLS2_QOS_TBL_GEMPORT_BITS 12
+#define MVPP2_CLS2_QOS_TBL_GEMPORT_MASK 0x00fff000
+#define MVPP2_CLS2_QOS_TBL_QUEUENUM_OFF 24
+#define MVPP2_CLS2_QOS_TBL_QUEUENUM_BITS 8
+#define MVPP2_CLS2_QOS_TBL_QUEUENUM_MASK 0xff000000
+#define MVPP2_CLS2_HIT_CTR_REG 0x1B50
+#define MVPP2_CLS2_HIT_CTR_OFF 0
+#define MVPP2_CLS2_HIT_CTR_BITS 32
+#define MVPP2_CLS2_HIT_CTR_MASK 0xffffffff
+#define MVPP2_CLS2_HIT_CTR_CLR_REG 0x1B54
+#define MVPP2_CLS2_HIT_CTR_CLR_CLR_OFF 0
+#define MVPP2_CLS2_HIT_CTR_CLR_CLR_MASK BIT(0)
+#define MVPP2_CLS2_HIT_CTR_CLR_DONE_OFF 1
+#define MVPP2_CLS2_HIT_CTR_CLR_DONE_MASK BIT(1)
+#define MVPP2_CLS2_ACT_REG 0x1B60
+#define MVPP2_CLS2_ACT_COLOR_OFF 0
+#define MVPP2_CLS2_ACT_COLOR_BITS 3
+#define MVPP2_CLS2_ACT_COLOR_MASK 0x00000007
+#define MVPP2_CLS2_ACT_PRI_OFF 3
+#define MVPP2_CLS2_ACT_PRI_BITS 2
+#define MVPP2_CLS2_ACT_PRI_MASK 0x00000018
+#define MVPP2_CLS2_ACT_DSCP_OFF 5
+#define MVPP2_CLS2_ACT_DSCP_BITS 2
+#define MVPP2_CLS2_ACT_DSCP_MASK 0x00000060
+#define MVPP2_CLS2_ACT_GEM_OFF 7
+#define MVPP2_CLS2_ACT_GEM_BITS 2
+#define MVPP2_CLS2_ACT_GEM_MASK 0x00000180
+#define MVPP2_CLS2_ACT_QL_OFF 9
+#define MVPP2_CLS2_ACT_QL_BITS 2
+#define MVPP2_CLS2_ACT_QL_MASK 0x00000600
+#define MVPP2_CLS2_ACT_QH_OFF 11
+#define MVPP2_CLS2_ACT_QH_BITS 2
+#define MVPP2_CLS2_ACT_QH_MASK 0x00001800
+#define MVPP2_CLS2_ACT_FRWD_OFF 13
+#define MVPP2_CLS2_ACT_FRWD_BITS 3
+#define MVPP2_CLS2_ACT_FRWD_MASK 0x0000e000
+#define MVPP2_CLS2_ACT_PLCR_OFF 16
+#define MVPP2_CLS2_ACT_PLCR_BITS 2
+#define MVPP2_CLS2_ACT_PLCR_MASK 0x00030000
+#define MVPP2_CLS2_ACT_FLD_EN_OFF 18
+#define MVPP2_CLS2_ACT_FLD_EN_BITS 1
+#define MVPP2_CLS2_ACT_FLD_EN_MASK 0x00040000
+#define MVPP2_CLS2_ACT_RSS_OFF 19
+#define MVPP2_CLS2_ACT_RSS_BITS 2
+#define MVPP2_CLS2_ACT_RSS_MASK 0x00180000
+#define MVPP2_CLS2_ACT_QOS_ATTR_REG 0x1B64
+#define MVPP2_CLS2_ACT_QOS_ATTR_PRI_OFF 0
+#define MVPP2_CLS2_ACT_QOS_ATTR_PRI_BITS 3
+#define MVPP2_CLS2_ACT_QOS_ATTR_PRI_MASK 0x00000007
+#define MVPP2_CLS2_ACT_QOS_ATTR_PRI_MAX ((1 << MVPP2_CLS2_ACT_QOS_ATTR_PRI_BITS) - 1)
+#define MVPP2_CLS2_ACT_QOS_ATTR_DSCP_OFF 3
+#define MVPP2_CLS2_ACT_QOS_ATTR_DSCP_BITS 6
+#define MVPP2_CLS2_ACT_QOS_ATTR_DSCP_MASK 0x000001f8
+#define MVPP2_CLS2_ACT_QOS_ATTR_DSCP_MAX ((1 << MVPP2_CLS2_ACT_QOS_ATTR_DSCP_BITS) - 1)
+#define MVPP2_CLS2_ACT_QOS_ATTR_GEM_OFF 9
+#define MVPP2_CLS2_ACT_QOS_ATTR_GEM_BITS 12
+#define MVPP2_CLS2_ACT_QOS_ATTR_GEM_MASK 0x001ffe00
+#define MVPP2_CLS2_ACT_QOS_ATTR_GEM_MAX ((1 << MVPP2_CLS2_ACT_QOS_ATTR_GEM_BITS) - 1)
+#define MVPP2_CLS2_ACT_QOS_ATTR_QL_OFF 21
+#define MVPP2_CLS2_ACT_QOS_ATTR_QL_BITS 3
+#define MVPP2_CLS2_ACT_QOS_ATTR_QL_MASK 0x00e00000
+#define MVPP2_CLS2_ACT_QOS_ATTR_QH_OFF 24
+#define MVPP2_CLS2_ACT_QOS_ATTR_QH_BITS 5
+#define MVPP2_CLS2_ACT_QOS_ATTR_QH_MASK 0x1f000000
+#define MVPP2_CLS2_ACT_HWF_ATTR_REG 0x1B68
+#define MVPP2_CLS2_ACT_HWF_ATTR_DPTR_OFF 1
+#define MVPP2_CLS2_ACT_HWF_ATTR_DPTR_BITS 15
+#define MVPP2_CLS2_ACT_HWF_ATTR_DPTR_MASK 0x0000fffe
+#define MVPP2_CLS2_ACT_HWF_ATTR_DPTR_MAX ((1 << MVPP2_CLS2_ACT_HWF_ATTR_DPTR_BITS) - 1)
+#define MVPP2_CLS2_ACT_HWF_ATTR_IPTR_OFF 16
+#define MVPP2_CLS2_ACT_HWF_ATTR_IPTR_BITS 8
+#define MVPP2_CLS2_ACT_HWF_ATTR_IPTR_MASK 0x00ff0000
+#define MVPP2_CLS2_ACT_HWF_ATTR_IPTR_MAX ((1 << MVPP2_CLS2_ACT_HWF_ATTR_IPTR_BITS) - 1)
+#define MVPP2_CLS2_ACT_HWF_ATTR_L4CHK_OFF 24
+#define MVPP2_CLS2_ACT_HWF_ATTR_L4CHK_BITS 1
+#define MVPP2_CLS2_ACT_HWF_ATTR_L4CHK_MASK 0x01000000
+#define MVPP2_CLS2_ACT_HWF_ATTR_MTUIDX_OFF 25
+#define MVPP2_CLS2_ACT_HWF_ATTR_MTUIDX_BITS 4
+#define MVPP2_CLS2_ACT_HWF_ATTR_MTUIDX_MASK 0x1e000000
+#define MVPP2_CLS2_ACT_DUP_ATTR_REG 0x1B6C
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPID_OFF 0
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPID_BITS 8
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPID_MASK 0x000000ff
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPCNT_OFF 8
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPCNT_BITS 4
+#define MVPP2_CLS2_ACT_DUP_ATTR_DUPCNT_MASK 0x00000f00
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRID_OFF 24
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRID_BITS 5
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRID_MASK 0x1f000000
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRBK_OFF 29
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRBK_BITS 1
+#define MVPP2_CLS2_ACT_DUP_ATTR_PLCRBK_MASK 0x20000000
+#define MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_OFF 30
+#define MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_BITS 1
+#define MVPP2_CLS2_ACT_DUP_ATTR_RSSEN_MASK 0x40000000
+#define MVPP21_CLS2_ACT_SEQ_ATTR_REG 0x1B70
+#define MVPP21_CLS2_ACT_SEQ_ATTR_ID 0
+#define MVPP21_CLS2_ACT_SEQ_ATTR_ID_BITS 8
+#define MVPP21_CLS2_ACT_SEQ_ATTR_ID_MASK 0x000000ff
+#define MVPP21_CLS2_ACT_SEQ_ATTR_MISS_OFF 8
+#define MVPP21_CLS2_ACT_SEQ_ATTR_MISS_BITS 1
+#define MVPP21_CLS2_ACT_SEQ_ATTR_MISS_MASK 0x00000100
+#define MVPP22_CLS2_ACT_SEQ_ATTR_REG 0x1B70
+#define MVPP22_CLS2_ACT_SEQ_ATTR_ID 0
+#define MVPP22_CLS2_ACT_SEQ_ATTR_ID_BITS 16
+#define MVPP22_CLS2_ACT_SEQ_ATTR_ID_MASK 0x0000ffff
+#define MVPP22_CLS2_ACT_SEQ_ATTR_MISS_OFF 16
+#define MVPP22_CLS2_ACT_SEQ_ATTR_MISS_BITS 1
+#define MVPP22_CLS2_ACT_SEQ_ATTR_MISS_MASK 0x0001000
+#define MVPP2_CLS2_TCAM_CFG0_REG 0x1b80
+#define MVPP2_CLS2_TCAM_CFG0_EN_OFF 0
+#define MVPP2_CLS2_TCAM_CFG0_EN_MASK 0x00000001
+#define MVPP2_CLS2_TCAM_CFG0_SIZE_OFF 1
+#define MVPP2_CLS2_TCAM_CFG0_SIZE_MASK 0x0000001e
+#define MVPP2_CLS2_TCAM_CTRL_REG 0x1B90
+#define MVPP2_CLS2_TCAM_CTRL_EN_OFF 0
+#define MVPP2_CLS2_TCAM_CTRL_EN_MASK 0x0000001
+
+/* Classifier C2 QOS Table (DSCP/PRI Table) */
+#define MVPP2_QOS_TBL_LINE_NUM_PRI 8
+#define MVPP2_QOS_TBL_NUM_PRI 64
+#define MVPP2_QOS_TBL_LINE_NUM_DSCP 64
+#define MVPP2_QOS_TBL_NUM_DSCP 8
+
+/*------------------Classifier C3 Top Registers---------------------------*/
+#define MVPP2_CLS3_KEY_CTRL_REG 0x1C10
+#define KEY_CTRL_L4 0
+#define KEY_CTRL_L4_BITS 3
+#define KEY_CTRL_L4_MAX ((1 << \
+ KEY_CTRL_L4_BITS) - 1)
+#define KEY_CTRL_L4_MASK (((1 << \
+ KEY_CTRL_L4_BITS) - 1) << KEY_CTRL_L4)
+#define KEY_CTRL_LKP_TYPE 4
+#define KEY_CTRL_LKP_TYPE_BITS 6
+#define KEY_CTRL_LKP_TYPE_MAX ((1 << \
+ KEY_CTRL_LKP_TYPE_BITS) - 1)
+#define KEY_CTRL_LKP_TYPE_MASK (((1 << \
+ KEY_CTRL_LKP_TYPE_BITS) - 1) << KEY_CTRL_LKP_TYPE)
+#define KEY_CTRL_PRT_ID_TYPE 12
+#define KEY_CTRL_PRT_ID_TYPE_BITS 2
+#define KEY_CTRL_PRT_ID_TYPE_MAX ((1 << \
+ KEY_CTRL_PRT_ID_TYPE_BITS) - 1)
+#define KEY_CTRL_PRT_ID_TYPE_MASK ((KEY_CTRL_PRT_ID_TYPE_MAX) << \
+ KEY_CTRL_PRT_ID_TYPE)
+#define KEY_CTRL_PRT_ID 16
+#define KEY_CTRL_PRT_ID_BITS 8
+#define KEY_CTRL_PRT_ID_MAX ((1 << \
+ KEY_CTRL_PRT_ID_BITS) - 1)
+#define KEY_CTRL_PRT_ID_MASK (((1 << \
+ KEY_CTRL_PRT_ID_BITS) - 1) << KEY_CTRL_PRT_ID)
+#define KEY_CTRL_HEK_SIZE 24
+#define KEY_CTRL_HEK_SIZE_BITS 6
+#define KEY_CTRL_HEK_SIZE_MAX 36
+#define KEY_CTRL_HEK_SIZE_MASK (((1 << \
+ KEY_CTRL_HEK_SIZE_BITS) - 1) << KEY_CTRL_HEK_SIZE)
+
+#define MVPP2_CLS3_KEY_HEK_REG(reg_num) (0x1C34 - 4 * (reg_num))
+
+#define MVPP2_CLS3_QRY_ACT_REG 0x1C40
+#define MVPP2_CLS3_QRY_ACT 0
+
+#define MVPP2_CLS3_QRY_RES_HASH_REG(hash) (0x1C50 + 4 * (hash))
+#define MVPP2_CLS3_HASH_BANKS_NUM 8
+
+#define MVPP2_CLS3_INIT_HIT_CNT_REG 0x1C80
+#define MVPP2_CLS3_INIT_HIT_CNT_OFFS 6
+#define MVPP2_CLS3_INIT_HIT_CNT_BITS 18
+#define MVPP2_CLS3_INIT_HIT_CNT_MASK (((1 << \
+ MVPP2_CLS3_INIT_HIT_CNT_BITS) - 1) << MVPP2_CLS3_INIT_HIT_CNT_OFFS)
+#define MVPP2_CLS3_INIT_HIT_CNT_MAX ((1 << \
+ MVPP2_CLS3_INIT_HIT_CNT_BITS) - 1)
+
+#define MVPP2_CLS3_HASH_OP_REG 0x1C84
+#define MVPP2_CLS3_HASH_OP_TBL_ADDR 0
+#define MVPP2_CLS3_HASH_OP_TBL_ADDR_BITS 12
+#define MVPP2_CLS3_HASH_OP_TBL_ADDR_MAX ((1 << \
+ MVPP2_CLS3_HASH_OP_TBL_ADDR_BITS) - 1)
+#define MVPP2_CLS3_HASH_OP_TBL_ADDR_MASK \
+ ((MVPP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MVPP2_CLS3_HASH_OP_TBL_ADDR)
+#define MVPP2_CLS3_MISS_PTR 12
+#define MVPP2_CLS3_MISS_PTR_MASK BIT(MVPP2_CLS3_MISS_PTR)
+#define MVPP2_CLS3_HASH_OP_DEL 14
+#define MVPP2_CLS3_HASH_OP_ADD 15
+#define MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR 16
+#define MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR_BITS 8
+#define MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR_MAX ((1 << \
+ MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR_BITS) - 1)
+#define MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR_MASK \
+ ((MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR_MAX) << \
+ MVPP2_CLS3_HASH_OP_EXT_TBL_ADDR)
+
+#define MVPP2_CLS3_STATE_REG 0x1C8C
+#define MVPP2_CLS3_STATE_CPU_DONE 0
+#define MVPP2_CLS3_STATE_CPU_DONE_MASK (1 << \
+ MVPP2_CLS3_STATE_CPU_DONE)
+#define MVPP2_CLS3_STATE_CLEAR_CTR_DONE 1
+#define MVPP2_CLS3_STATE_CLEAR_CTR_DONE_MASK (1 << \
+ MVPP2_CLS3_STATE_CLEAR_CTR_DONE)
+#define MVPP2_CLS3_STATE_SC_DONE 2
+#define MVPP2_CLS3_STATE_SC_DONE_MASK BIT(MVPP2_CLS3_STATE_SC_DONE)
+#define MVPP2_CLS3_STATE_OCCIPIED 8
+#define MVPP2_CLS3_STATE_OCCIPIED_BITS 8
+#define MVPP2_CLS3_STATE_OCCIPIED_MASK (((1 << \
+ MVPP2_CLS3_STATE_OCCIPIED_BITS) - 1) << MVPP2_CLS3_STATE_OCCIPIED)
+
+#define MVPP2_CLS3_STATE_SC_STATE 16
+#define MVPP2_CLS3_STATE_SC_STATE_BITS 2
+#define MVPP2_CLS3_STATE_SC_STATE_MASK (((1 << \
+ MVPP2_CLS3_STATE_SC_STATE_BITS) - 1) << MVPP2_CLS3_STATE_SC_STATE)
+
+/* SCAN STATUS
+ * 0 - scan compleat
+ * 1 - hit counter clear
+ * 3 - scan wait
+ * 4 - scan in progress
+ */
+
+#define MVPP2_CLS3_STATE_NO_OF_SC_RES 20
+#define MVPP2_CLS3_STATE_NO_OF_SC_RES_BITS 9
+#define MVPP2_CLS3_STATE_NO_OF_SC_RES_MASK (((1 << \
+ MVPP2_CLS3_STATE_NO_OF_SC_RES_BITS) - 1) << \
+ MVPP2_CLS3_STATE_NO_OF_SC_RES)
+
+#define MVPP2_CLS3_DB_INDEX_REG 0x1C90
+#define MVPP2_CLS3_DB_MISS_OFFS 12
+#define MVPP2_CLS3_DB_MISS_MASK BIT(MVPP2_CLS3_DB_MISS_OFFS)
+
+ /* 0-3 valid val*/
+#define MVPP2_CLS3_HASH_DATA_REG(num) (0x1CA0 + 4 * (num))
+#define MVPP2_CLS3_HASH_DATA_REG_NUM 4
+#define MVPP2_CLS3_HASH_EXT_DATA_REG(num) (0x1CC0 + 4 * (num))
+#define MVPP2_CLS3_HASH_EXT_DATA_REG_NUM 7
+
+#define MVPP2_CLS3_CLEAR_COUNTERS_REG 0x1D00
+#define MVPP2_CLS3_CLEAR_COUNTERS 0
+#define MVPP2_CLS3_CLEAR_COUNTERS_BITS 7
+#define MVPP2_CLS3_CLEAR_ALL 0x3f
+#define MVPP2_CLS3_CLEAR_COUNTERS_MAX 0x3F
+#define MVPP2_CLS3_CLEAR_COUNTERS_MASK \
+ ((MVPP2_CLS3_CLEAR_COUNTERS_MAX) << \
+ MVPP2_CLS3_CLEAR_COUNTERS)
+
+#define MVPP2_CLS3_HIT_COUNTER_REG 0x1D08
+#define MVPP2_CLS3_HIT_COUNTER 0
+#define MVPP2_CLS3_HIT_COUNTER_BITS 24
+#define MVPP2_CLS3_HIT_COUNTER_MAX ((1 << \
+ MVPP2_CLS3_HIT_COUNTER_BITS) - 1)
+#define MVPP2_CLS3_HIT_COUNTER_MASK \
+ ((MVPP2_CLS3_HIT_COUNTER_MAX) << MVPP2_CLS3_HIT_COUNTER)
+
+#define MVPP2_CLS3_SC_PROP_REG 0x1D10
+#define MVPP2_CLS3_SC_PROP_TH_MODE 0
+#define MVPP2_CLS3_SC_PROP_TH_MODE_MASK (1 << \
+ MVPP2_CLS3_SC_PROP_TH_MODE)
+#define MVPP2_CLS3_SC_PROP_CLEAR 1
+#define MVPP2_CLS3_SC_PROP_CLEAR_MASK (1 << \
+ MVPP2_CLS3_SC_PROP_CLEAR)
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE_EN 3
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE_EN_MASK (1 << \
+ MVPP2_CLS3_SC_PROP_LKP_TYPE_EN)
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE 4
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE_BITS 6
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE_MAX ((1 << \
+ MVPP2_CLS3_SC_PROP_LKP_TYPE_BITS) - 1)
+#define MVPP2_CLS3_SC_PROP_LKP_TYPE_MASK \
+ ((MVPP2_CLS3_SC_PROP_LKP_TYPE_MAX) << MVPP2_CLS3_SC_PROP_LKP_TYPE)
+#define MVPP2_CLS3_SC_PROP_START_ENTRY 16
+#define MVPP2_CLS3_SC_PROP_START_ENTRY_MASK \
+ ((MVPP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MVPP2_CLS3_SC_PROP_START_ENTRY)
+
+#define MVPP2_CLS3_SC_PROP_VAL_REG 0x1D14
+#define MVPP2_CLS3_SC_PROP_VAL_DELAY 0
+#define MVPP2_CLS3_SC_PROP_VAL_DELAY_BITS 16
+#define MVPP2_CLS3_SC_PROP_VAL_DELAY_MAX ((1 << \
+ MVPP2_CLS3_SC_PROP_VAL_DELAY_BITS) - 1)
+#define MVPP2_CLS3_SC_PROP_VAL_DELAY_MASK \
+ (MVPP2_CLS3_SC_PROP_VAL_DELAY_MAX << MVPP2_CLS3_SC_PROP_VAL_DELAY)
+
+#define MVPP2_CLS3_SC_TH_REG 0x1D18
+#define MVPP2_CLS3_SC_TH 4
+#define MVPP2_CLS3_SC_TH_BITS 20
+#define MVPP2_CLS3_SC_TH_MAX ((1 << \
+ MVPP2_CLS3_SC_TH_BITS) - 1)
+#define MVPP2_CLS3_SC_TH_MASK (((1 << \
+ MVPP2_CLS3_SC_TH_BITS) - 1) << MVPP2_CLS3_SC_TH)
+
+#define MVPP2_CLS3_SC_TIMER_REG 0x1D1c
+#define MVPP2_CLS3_SC_TIMER 0
+#define MVPP2_CLS3_SC_TIMER_BITS 16
+#define MVPP2_CLS3_SC_TIMER_MASK \
+ (((1 << MVPP2_CLS3_SC_TIMER_BITS) - 1) << MVPP2_CLS3_SC_TIMER)
+
+#define MVPP2_CLS3_SC_ACT_REG 0x1D20
+#define MVPP2_CLS3_SC_ACT 0
+
+#define MVPP2_CLS3_SC_INDEX_REG 0x1D28
+#define MVPP2_CLS3_SC_INDEX 0
+
+#define MVPP2_CLS3_SC_RES_REG 0x1D2C
+#define MVPP2_CLS3_SC_RES_ENTRY 0
+#define MVPP2_CLS3_SC_RES_ENTRY_MASK \
+ ((MVPP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MVPP2_CLS3_SC_RES_ENTRY)
+#define MVPP2_CLS3_SC_RES_CTR 12
+#define MVPP2_CLS3_SC_RES_CTR_MASK \
+ ((MVPP2_CLS3_HIT_COUNTER_MAX) << MVPP2_CLS3_SC_RES_CTR)
+
+#define MVPP2_CLS3_ACT_REG 0x1D40
+
+#define MVPP2_CLS3_ACT_QOS_ATTR_REG 0x1D44
+
+#define MVPP2_CLS3_ACT_HWF_ATTR_REG 0x1D48
+
+#define MVPP2_CLS3_ACT_DUP_ATTR_REG 0x1D4C
+#define MVPP2_CLS3_ACT_SEQ_L_ATTR_REG 0x1D50
+#define MVPP2_CLS3_ACT_SEQ_H_ATTR_REG 0x1D54
+#define MVPP2_CLS3_ACT_SEQ_SIZE 38
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG 0x2040
+
+#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP21_RXQ_DESC_ADDR_SHIFT MVPP21_DESC_ADDR_SHIFT
+#define MVPP21_RXQ_DESC_ADDR_MASK 0xfffffe00
+
+#define MVPP22_RXQ_DESC_ADDR_SHIFT MVPP22_DESC_ADDR_SHIFT
+#define MVPP22_RXQ_DESC_ADDR_MASK 0xfffffffe
+
+#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
+#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
+#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
+#define MVPP2_RXQ_NUM_NEW_OFFSET 16
+#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
+#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
+#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
+#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
+#define MVPP2_RXQ_THRESH_REG 0x204c
+#define MVPP2_OCCUPIED_THRESH_OFFSET 0
+#define MVPP2_MAX_OCCUPIED_THRESH 0x3fff
+#define MVPP2_OCCUPIED_THRESH_MASK MVPP2_MAX_OCCUPIED_THRESH
+#define MVPP2_RXQ_INDEX_REG 0x2050
+#define MVPP2_TXQ_NUM_REG 0x2080
+#define MVPP2_TXQ_DESC_ADDR_LOW_REG 0x2084
+#define MVPP2_TXQ_DESC_ADDR_LOW_SHIFT 0
+#define MVPP2_TXQ_DESC_ADDR_LOW_MASK 0xfffffe00
+#define MVPP22_TXQ_DESC_ADDR_HIGH_REG 0x20a8
+#define MVPP22_TXQ_DESC_ADDR_HIGH_MASK 0xff
+#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
+#define MVPP2_TXQ_DESC_HWF_SIZE_REG 0x208c
+#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
+#define MVPP2_TXQ_THRESH_REG 0x2094
+#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
+#define MVPP2_MAX_TRANSMITTED_THRESH 0x3fff
+#define MVPP2_TRANSMITTED_THRESH_MASK \
+ ((MVPP2_MAX_TRANSMITTED_THRESH) << MVPP2_TRANSMITTED_THRESH_OFFSET)
+#define MVPP2_TXQ_INDEX_REG 0x2098
+#define MVPP2_TXQ_PREF_BUF_REG 0x209c
+#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
+#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
+#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
+#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
+#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
+#define MVPP2_TXQ_PENDING_REG 0x20a0
+#define MVPP2_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
+
+#define MVPP21_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
+#define MVPP21_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP21_TRANSMITTED_COUNT_MASK 0x3fff0000
+#define MVPP22_TXQ_SENT_REG(txq) (0x3e00 + 4 * (txq - 128))
+#define MVPP22_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP22_TRANSMITTED_COUNT_MASK 0x3fff0000
+
+#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
+#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
+#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
+#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
+#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP21_AGGR_TXQ_DESC_ADDR_SHIFT MVPP21_DESC_ADDR_SHIFT
+#define MVPP21_AGGR_TXQ_DESC_ADDR_MASK 0xfffffe00
+#define MVPP22_AGGR_TXQ_DESC_ADDR_SHIFT MVPP22_DESC_ADDR_SHIFT
+#define MVPP22_AGGR_TXQ_DESC_ADDR_MASK 0xfffffffe
+
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE 0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP22_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
+#define MVPP22_MAX_ISR_TX_THRESHOLD 0xfffff0
+#define MVPP22_ISR_TX_THRESHOLD_MASK MVPP22_MAX_ISR_TX_THRESHOLD
+
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
+#define MVPP2_ISR_RX_THRESHOLD_MASK MVPP2_MAX_ISR_RX_THRESHOLD
+
+#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
+#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
+
+#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
+#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
+
+#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
+#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
+#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
+
+#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
+#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
+#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(eth_port) (0x5480 + 4 * (eth_port))
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
+
+#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
+#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
+#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
+#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
+#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
+#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
+
+#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
+#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
+#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
+
+#define MV_PP21_ISR_RX_ERR_CAUSE_REG(port) (0x5500 + 4 * (port))
+#define MV_PP21_ISR_RX_ERR_CAUSE_NONOCC_MASK 0xffff
+#define MV_PP21_ISR_RX_ERR_CAUSE_DESC_RES_MASK 0xffff0000
+#define MV_PP21_ISR_RX_ERR_MASK_REG(port) (0x5520 + 4 * (port))
+
+#define MV_PP22_ISR_RX_ERR_CAUSE_REG(port) (0x5500 + 4 * (port))
+#define MV_PP22_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
+#define MV_PP22_ISR_RX_ERR_CAUSE_DESC_RES_MASK 0xff0000
+#define MV_PP22_ISR_RX_ERR_MASK_REG(port) (0x5520 + 4 * (port))
+
+#define MV_PP2_ISR_TX_ERR_CAUSE_REG(eth_port) (0x5540 + 4 * (eth_port))
+#define MV_PP2_ISR_TX_ERR_MASK_REG(eth_port) (0x5560 + 4 * (eth_port))
+
+#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_MISC_MASK_REG 0x55b4
+
+#define MVPP22_ISR_NO_BUF_CAUSE_REG 0x55b8
+#define MVPP22_ISR_NO_BUF_MASK_REG 0x55bc
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_ADDR_REG(pool) (0x6000 + ((pool) * 4))
+#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
+#define MVPP21_BM_POOL_SIZE_MASK 0xfff0
+#define MVPP21_BM_POOL_SIZE_OFFSET 4
+
+#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
+#define MVPP21_BM_POOL_READ_PTR_REG MVPP2_BM_POOL_READ_PTR_REG
+
+#define MVPP21_BM_POOL_GET_READ_PTR_MASK 0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
+#define MVPP21_BM_POOL_PTRS_NUM_REG MVPP2_BM_POOL_PTRS_NUM_REG
+
+#define MVPP21_BM_POOL_PTRS_NUM_MASK 0xfff0
+
+#define MVPP22_BM_POOL_SIZE_MASK 0xfff8
+#define MVPP22_BM_POOL_SIZE_OFFSET 3
+
+/* Use PPV21 Pool Size both for PPV21/PPV22, deliberately ignore PPV22 */
+#define MVPP2_BM_POOL_SIZE_MASK MVPP21_BM_POOL_SIZE_MASK
+#define MVPP2_BM_POOL_SIZE_OFFSET MVPP21_BM_POOL_SIZE_OFFSET
+#undef MVPP22_BM_POOL_SIZE_MASK
+#undef MVPP22_BM_POOL_SIZE_OFFSET
+
+#define MVPP22_BM_POOL_READ_PTR_REG MVPP2_BM_POOL_READ_PTR_REG
+#define MVPP22_BM_POOL_GET_READ_PTR_MASK 0xfff8
+#define MVPP22_BM_POOL_PTRS_NUM_REG MVPP2_BM_POOL_PTRS_NUM_REG
+#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
+
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
+#define MVPP2_BM_START_MASK BIT(0)
+#define MVPP2_BM_STOP_MASK BIT(1)
+#define MVPP2_BM_STATE_MASK BIT(4)
+#define MVPP2_BM_LOW_THRESH_OFFS 8
+#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
+#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_LOW_THRESH_OFFS)
+#define MVPP2_BM_HIGH_THRESH_OFFS 16
+#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
+#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
+ MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
+#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
+#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
+#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
+#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
+#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
+
+#define MVPP21_BM_UNUSED_PTR_THRESH_REG(pool) (0x62c0 + ((pool) * 4))
+#define MVPP21_BM_UNUSED_PTR_THRESH_MASK 0xfff0
+#define MVPP22_BM_UNUSED_PTR_THRESH_REG(pool) (0x62c0 + ((pool) * 4))
+#define MVPP22_BM_UNUSED_PTR_THRESH_MASK 0xfff8
+
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+
+#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+
+#define MVPP22_BM_PHY_VIRT_HIGH_ALLOC_REG 0x6444
+#define MVPP22_BM_PHY_HIGH_ALLOC_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_ALLOC_OFFSET 8
+#define MVPP22_BM_VIRT_HIGH_ALLOC_MASK 0xff00
+#define MVPP22_BM_PHY_HIGH_ALLOC_MASK 0xff
+
+#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
+#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
+#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
+#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
+
+#define MVPP2_BM_VIRT_RLS_REG 0x64c0
+
+#define MVPP21_BM_MC_RLS_REG 0x64c4 /* Not a mixup */
+#define MVPP21_BM_MC_ID_MASK 0xfff
+#define MVPP21_BM_FORCE_RELEASE_MASK BIT(12)
+
+#define MVPP22_BM_PHY_VIRT_HIGH_RLS_REG 0x64c4 /* Not a mixup */
+
+#define MVPP22_BM_PHY_HIGH_RLS_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_RLS_OFFST 8
+
+#define MVPP22_BM_MC_RLS_REG 0x64d4 /* Not a mixup */
+#define MVPP22_BM_MC_ID_MASK 0xfff
+#define MVPP22_BM_FORCE_RELEASE_MASK BIT(12)
+
+#define MVPP2_BM_PRIO_CTRL_REG 0x6800
+
+#define MVPP2_BM_PRIO_IDX_REG 0x6810
+#define MVPP2_BM_PRIO_IDX_BITS 8
+#define MVPP2_BM_PRIO_IDX_MAX 255
+#define MVPP2_BM_PRIO_IDX_MASK 0xff
+
+#define MVPP2_BM_CPU_QSET_REG 0x6814
+
+#define MVPP2_BM_CPU_SHORT_QSET_OFFS 0
+#define MVPP2_BM_CPU_SHORT_QSET_MASK (0x7f << \
+ MVPP2_BM_CPU_SHORT_QSET_OFFS)
+
+#define MVPP2_BM_CPU_LONG_QSET_OFFS 8
+#define MVPP2_BM_CPU_LONG_QSET_MASK (0x7f << \
+ MVPP2_BM_CPU_LONG_QSET_OFFS)
+
+#define MVPP2_BM_HWF_QSET_REG 0x6818
+
+#define MVPP2_BM_HWF_SHORT_QSET_OFFS 0
+#define MVPP2_BM_HWF_SHORT_QSET_MASK (0x7f << \
+ MVPP2_BM_HWF_SHORT_QSET_OFFS)
+
+#define MVPP2_BM_HWF_LONG_QSET_OFFS 8
+#define MVPP2_BM_HWF_LONG_QSET_MASK (0x7f << \
+ MVPP2_BM_HWF_LONG_QSET_OFFS)
+
+#define MVPP2_BM_QSET_SET_MAX_REG 0x6820
+
+#define MVPP2_BM_QSET_MAX_SHARED_OFFS 0
+#define MVPP2_BM_QSET_MAX_GRNTD_OFFS 16
+
+#define MVPP2_BM_QSET_MAX_SHARED_MASK (0xffff << \
+ MVPP2_BM_QSET_MAX_SHARED_OFFS)
+#define MVPP2_BM_QSET_MAX_GRNTD_MASK (0xffff << \
+ MVPP2_BM_QSET_MAX_GRNTD_OFFS)
+
+#define MVPP2_BM_QSET_SET_CNTRS_REG 0x6824
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
+#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
+#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
+#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_FIXED_PRIO_REG 0x8014
+#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
+#define MVPP2_TXP_SCHED_MTU_REG 0x801c
+#define MVPP2_TXP_MTU_MAX 0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
+#define MVPP2_TXP_REFILL_TOKENS_OFFS 0
+#define MVPP2_TXP_REFILL_TOKENS_MAX 0x7FFFF
+#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXP_REFILL_TOKENS_MASK(val) ((val) << \
+ MVPP2_TXP_REFILL_TOKENS_OFFS)
+#define MVPP2_TXP_REFILL_PERIOD_MAX 0x3FF
+#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
+#define MVPP2_TXP_SCHED_TOKEN_CNTR_REG 0x8028
+#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
+#define MVPP2_TXQ_REFILL_TOKENS_OFFS 0
+#define MVPP2_TXQ_REFILL_TOKENS_MAX 0x7FFFF
+#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXQ_REFILL_TOKENS_MASK(val) ((val) << \
+ MVPP2_TXQ_REFILL_TOKENS_OFFS)
+#define MVPP2_TXQ_REFILL_PERIOD_MAX 0x3FF
+#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
+/* Transmit Queue Arbiter Configuration (TQxAC) */
+#define MVPP2_TXQ_SCHED_WRR_REG(q) (0x80A0 + ((q) << 2))
+#define MVPP2_TXQ_WRR_WEIGHT_OFFS 0
+#define MVPP2_TXQ_WRR_WEIGHT_MAX 0xFF
+#define MVPP2_TXQ_WRR_WEIGHT_ALL_MASK (MVPP2_TXQ_WRR_WEIGHT_MAX << \
+ MVPP2_TXQ_WRR_WEIGHT_OFFS)
+#define MVPP2_TXQ_WRR_WEIGHT_MASK(weigth) ((weigth) << \
+ MVPP2_TXQ_WRR_WEIGHT_OFFS)
+#define MVPP2_TXQ_WRR_BYTE_COUNT_OFFS 8
+#define MVPP2_TXQ_WRR_BYTE_COUNT_MASK (0x3FFFF << \
+ MVPP2_TXQ_WRR_BYTE_COUNT_OFFS)
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG 0x8800
+#define MVPP2_TX_SNOOP_EN_MASK BIT(0)
+#define MVPP2_TX_SNOOP_EN_MASK BIT(0)
+#define MVPP22_TX_SNOOP_HWF_EN_MASK BIT(1)
+
+#define MVPP21_TX_FIFO_THRESH_REG 0x8804
+#define MVPP21_TX_FIFO_THRESH_MASK 0x7ff
+#define MVPP22_TX_FIFO_THRESH_REG(eth_tx_port) (0x8840 + ((eth_tx_port) << 2))
+#define MVPP22_TX_FIFO_THRESH_MASK 0x3fff
+
+#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
+#define MVPP22_TX_FIFO_SIZE_MASK 0xf
+
+#define MVPP2_TX_PORT_FLUSH_REG 0x8810
+#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
+
+ /* Same for PPv21/PPv22 */
+#define MVPP2_TX_BAD_FCS_CNTR_REG(eth_tx_port) (0x8940 + ((eth_tx_port) << 2))
+ /* Same for PPv21/PPv22 */
+#define MVPP2_TX_DROP_CNTR_REG(eth_tx_port) (0x8980 + ((eth_tx_port) << 2))
+
+#define MVPP2_TX_ETH_DSEC_THRESH_REG(eth_tx_port)(0x8a40 + \
+ ((eth_tx_port) << 2))
+#define MVPP2_TX_ETH_DSEC_THRESH_MASK 0x7f0
+
+#define MVPP22_TX_EGR_PIPE_DELAY_REG(eth_tx_port)(0x8a80 + \
+ ((eth_tx_port) << 2))
+#define MVPP22_TX_EGR_PIPE_DELAY_MASK 0x3fff
+#define MVPP22_TX_PTP_DISPATCH_ENABLE_MASK BIT(30)
+
+#define MVPP22_TX_PORT_SHORT_HDR_REG 0x8ac0
+#define MVPP22_TX_PORT_SHORT_HDR_MASK 0x7f
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE 0x24
+#define MVPP2_SRC_ADDR_HIGH 0x28
+#define MVPP2_PHY_AN_CFG0_REG 0x34
+#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \
+ 0x400 + (port) * 0x400)
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+#define MVPP2_ISR_SUM_MASK_REG 0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG 0x0
+#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
+#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
+#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG 0x4
+#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
+#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
+#define MVPP2_GMAC_PCS_LB_EN_BIT 6
+#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
+#define MVPP2_GMAC_SA_LOW_OFFS 7
+#define MVPP2_GMAC_CTRL_2_REG 0x8
+#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
+#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
+#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
+#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
+#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
+#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE 2
+#define MVPP2_ETH_TYPE_LEN 2
+#define MVPP2_PPPOE_HDR_SIZE 8
+#define MVPP2_VLAN_TAG_LEN 4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE 0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
+#define MVPP2_TX_CSUM_MAX_SIZE 9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
+
+#define MVPP2_TX_MTU_MAX 0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT 16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS 4
+
+/* Maximum number of supported cells */
+#define MVPP2_MAX_CELLS 4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ 8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ 8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ 4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+#define MVPP2_TXQ_TOTAL_NUM (128/*pon*/ + \
+ MVPP2_MAX_PORTS * MVPP2_MAX_TXQ/*eth*/)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD 1024
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD 2048
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK 128
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE 512
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE 32
+#define MVPP2_DESC_Q_ALIGN 512
+
+#define MVPP2_DESCQ_MEM_SIZE(descs) (descs * MVPP2_DESC_ALIGNED_SIZE + \
+ MVPP2_DESC_Q_ALIGN)
+#define MVPP2_DESCQ_MEM_ALIGN(mem) (ALIGN(mem, MVPP2_DESC_Q_ALIGN))
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+ ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+ ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+
+#define MVPP2_RX_MTU_SIZE(pkt_size) \
+ (pkt_size - MVPP2_MH_SIZE - MVPP2_VLAN_TAG_LEN - \
+ ETH_HLEN - ETH_FCS_LEN)
+
+#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+ ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE 16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK BIT(0)
+#define MVPP2_F_IFCAP_NETMAP BIT(1)
+
+/* Marvell tag types */
+enum mv_pp2x_tag_type {
+ MVPP2_TAG_TYPE_NONE = 0,
+ MVPP2_TAG_TYPE_MH = 1,
+ MVPP2_TAG_TYPE_DSA = 2,
+ MVPP2_TAG_TYPE_EDSA = 3,
+ MVPP2_TAG_TYPE_VLAN = 4,
+ MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_PRS_TCAM_WORDS 6
+#define MVPP2_PRS_SRAM_WORDS 4
+#define MVPP2_PRS_FLOW_ID_SIZE 64
+#define MVPP2_PRS_FLOW_ID_MASK 0x3f
+#define MVPP2_PRS_TCAM_ENTRY_VALID 0
+#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
+#define MVPP2_PRS_IPV4_HEAD 0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
+#define MVPP2_PRS_IPV4_MC 0xe0
+#define MVPP2_PRS_IPV4_MC_MASK 0xf0
+#define MVPP2_PRS_IPV4_BC_MASK 0xff
+#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MASK 0xf
+#define MVPP2_PRS_IPV6_MC 0xff
+#define MVPP2_PRS_IPV6_MC_MASK 0xff
+#define MVPP2_PRS_IPV6_HOP_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX 100
+
+/* There is TCAM range reserved for MAC entries, range size is 113
+ * 1 BC MAC entry for all ports
+ * 4 M2M entries, 1 entry per port, and 4 ports in all
+ * 36 UC/MC MAC filter entries per port
+ * It is assumed that there are 3 ports for filter, not including loopback port
+ */
+#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 36
+#define MVPP2_PRS_MAC_RANGE_SIZE 113
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_PORT_MASK 0xff
+#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_TCAM_AI_BYTE 16
+#define MVPP2_PRS_TCAM_PORT_BYTE 17
+#define MVPP2_PRS_TCAM_LU_BYTE 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD 5
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL 0
+#define MVPP2_PE_FIRST_FREE_TID 1
+#define MVPP2_PE_MAC_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END -\
+ MVPP2_PRS_MAC_RANGE_SIZE + 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS 0
+#define MVPP2_PRS_SRAM_RI_WORD 0
+#define MVPP2_PRS_SRAM_RI_BITS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
+#define MVPP2_PRS_SRAM_SHIFT_BITS 8
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_UDF_OFFS 73
+#define MVPP2_PRS_SRAM_UDF_BITS 8
+#define MVPP2_PRS_SRAM_UDF_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_BITS 5
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
+#define MVPP2_PRS_SRAM_AI_OFFS 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
+#define MVPP2_PRS_SRAM_AI_MASK 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
+#define MVPP2_PRS_RI_DSA_MASK 0x2
+#define MVPP2_PRS_RI_VLAN_OFFS 2
+#define MVPP2_PRS_RI_VLAN_MASK 0xc
+#define MVPP2_PRS_RI_VLAN_NONE 0x0
+#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_OFFS 9
+#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
+#define MVPP2_PRS_RI_L2_UCAST 0x0
+#define MVPP2_PRS_RI_L2_MCAST BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK 0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
+#define MVPP2_PRS_RI_L3_UN 0x0
+#define MVPP2_PRS_RI_L3_IP4 BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6 BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
+#define MVPP2_PRS_RI_L3_UCAST 0x0
+#define MVPP2_PRS_RI_L3_MCAST BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
+#define MVPP2_PRS_RI_IP_FRAG_FALSE 0x0
+#define MVPP2_PRS_RI_UDF3_MASK 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
+#define MVPP2_PRS_RI_L4_TCP BIT(22)
+#define MVPP2_PRS_RI_L4_UDP BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI 0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+
+#define MVPP2_PRS_SRAM_SHIFT_MASK ((1 << \
+ MVPP2_PRS_SRAM_SHIFT_BITS) - 1)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED true
+#define MVPP2_PRS_UNTAGGED false
+#define MVPP2_PRS_EDSA true
+#define MVPP2_PRS_DSA false
+
+/* lkpid table structure */
+#define MVPP2_FLOWID_RXQ 0
+#define MVPP2_FLOWID_RXQ_BITS 8
+#define MVPP2_FLOWID_RXQ_MASK (((1 << \
+ MVPP2_FLOWID_RXQ_BITS) - 1) << MVPP2_FLOWID_RXQ)
+
+#define MVPP2_FLOWID_MODE 8
+#define MVPP2_FLOWID_MODE_BITS 8
+#define MVPP2_FLOWID_MODE_MASK (((1 << \
+ MVPP2_FLOWID_MODE_BITS) - 1) << MVPP2_FLOWID_MODE)
+#define MVPP2_FLOWID_MODE_MAX ((1 << MVPP2_FLOWID_MODE_BITS) - 1)
+
+#define MVPP2_FLOWID_FLOW 16
+#define MVPP2_FLOWID_FLOW_BITS 9
+#define MVPP2_FLOWID_FLOW_MASK (((1 << \
+ MVPP2_FLOWID_FLOW_BITS) - 1) << MVPP2_FLOWID_FLOW)
+
+#define MVPP2_FLOWID_EN 25 /*one bit */
+#define MVPP2_FLOWID_EN_MASK BIT(MVPP2_FLOWID_EN)
+
+/* flow table structure */
+#define MVPP2_FLOW_TBL_SIZE 512
+/*------------------------- DWORD 0 --------------------------------- */
+#define MVPP2_FLOW_LAST 0
+#define MVPP2_FLOW_LAST_MASK 1 /*one bit*/
+
+#define MVPP2_FLOW_ENGINE 1
+#define MVPP2_FLOW_ENGINE_BITS 3
+#define MVPP2_FLOW_ENGINE_MASK (((1 << \
+ MVPP2_FLOW_ENGINE_BITS) - 1) << MVPP2_FLOW_ENGINE)
+#define MVPP2_FLOW_ENGINE_MAX 7 /* valid value 1 - 7 */
+
+#define MVPP2_FLOW_PORT_ID 4
+#define MVPP2_FLOW_PORT_ID_BITS 8
+#define MVPP2_FLOW_PORT_ID_MASK (((1 << \
+ MVPP2_FLOW_PORT_ID_BITS) - 1) << MVPP2_FLOW_PORT_ID)
+#define MVPP2_FLOW_PORT_ID_MAX ((1 << MVPP2_FLOW_PORT_ID_BITS) - 1)
+
+#define MVPP2_FLOW_PORT_TYPE 12
+#define MVPP2_FLOW_PORT_TYPE_BITS 2
+#define MVPP2_FLOW_PORT_TYPE_MASK (((1 << \
+ MVPP2_FLOW_PORT_TYPE_BITS) - 1) << MVPP2_FLOW_PORT_TYPE)
+#define MVPP2_FLOW_PORT_TYPE_MAX 2 /* valid value 0 - 2 */
+
+#define MVPP2_FLOW_PPPOE 14
+#define MVPP2_FLOW_PPPOE_BITS 2
+#define MVPP2_FLOW_PPPOE_MASK (((1 << \
+ MVPP2_FLOW_PPPOE_BITS) - 1) << MVPP2_FLOW_PPPOE)
+#define MVPP2_FLOW_PPPOE_MAX 2 /* valid value 0 - 2 */
+
+#define MVPP2_FLOW_VLAN 16
+#define MVPP2_FLOW_VLAN_BITS 3
+#define MVPP2_FLOW_VLAN_MASK (((1 << \
+ MVPP2_FLOW_VLAN_BITS) - 1) << MVPP2_FLOW_VLAN)
+#define MVPP2_FLOW_VLAN_MAX ((1 << MVPP2_FLOW_VLAN_BITS) - 1)
+
+#define MVPP2_FLOW_MACME 19
+#define MVPP2_FLOW_MACME_BITS 2
+#define MVPP2_FLOW_MACME_MASK (((1 << \
+ MVPP2_FLOW_MACME_BITS) - 1) << MVPP2_FLOW_MACME)
+#define MVPP2_FLOW_MACME_MAX 2 /* valid value 0 - 2 */
+
+#define MVPP2_FLOW_UDF7 21
+#define MVPP2_FLOW_UDF7_BITS 2
+#define MVPP2_FLOW_UDF7_MASK (((1 << \
+ MVPP2_FLOW_UDF7_BITS) - 1) << MVPP2_FLOW_UDF7)
+#define MVPP2_FLOW_UDF7_MAX ((1 << MVPP2_FLOW_UDF7_BITS) - 1)
+
+#define MVPP2_FLOW_PORT_ID_SEL 23
+#define MVPP2_FLOW_PORT_ID_SEL_MASK BIT(MVPP2_FLOW_PORT_ID_SEL)
+
+/*----------------------- DWORD 1 ------------------------------------ */
+
+#define MVPP2_FLOW_FIELDS_NUM 0
+#define MVPP2_FLOW_FIELDS_NUM_BITS 3
+#define MVPP2_FLOW_FIELDS_NUM_MASK (((1 << \
+ MVPP2_FLOW_FIELDS_NUM_BITS) - 1) << MVPP2_FLOW_FIELDS_NUM)
+#define MVPP2_FLOW_FIELDS_NUM_MAX 4 /*valid vaue 0 - 4 */
+
+#define MVPP2_FLOW_LKP_TYPE 3
+#define MVPP2_FLOW_LKP_TYPE_BITS 6
+#define MVPP2_FLOW_LKP_TYPE_MASK (((1 << \
+ MVPP2_FLOW_LKP_TYPE_BITS) - 1) << MVPP2_FLOW_LKP_TYPE)
+#define MVPP2_FLOW_LKP_TYPE_MAX ((1 << MVPP2_FLOW_LKP_TYPE_BITS) - 1)
+
+#define MVPP2_FLOW_FIELD_PRIO 9
+#define MVPP2_FLOW_FIELD_PRIO_BITS 6
+#define MVPP2_FLOW_FIELD_PRIO_MASK (((1 << \
+ MVPP2_FLOW_FIELD_PRIO_BITS) - 1) << MVPP2_FLOW_FIELD_PRIO)
+#define MVPP2_FLOW_FIELD_PRIO_MAX ((1 << MVPP2_FLOW_FIELD_PRIO_BITS) - 1)
+
+#define MVPP2_FLOW_SEQ_CTRL 15
+#define MVPP2_FLOW_SEQ_CTRL_BITS 3
+#define MVPP2_FLOW_SEQ_CTRL_MASK (((1 << \
+ MVPP2_FLOW_SEQ_CTRL_BITS) - 1) << MVPP2_FLOW_SEQ_CTRL)
+#define MVPP2_FLOW_SEQ_CTRL_MAX 4
+
+/*------------------------- DWORD 2 ---------------------------------- */
+#define MVPP2_FLOW_FIELD0_ID 0
+#define MVPP2_FLOW_FIELD1_ID 6
+#define MVPP2_FLOW_FIELD2_ID 12
+#define MVPP2_FLOW_FIELD3_ID 18
+
+#define MVPP2_FLOW_FIELD_ID_BITS 6
+#define MVPP2_FLOW_FIELD_ID(num) (MVPP2_FLOW_FIELD0_ID + \
+ (MVPP2_FLOW_FIELD_ID_BITS * (num)))
+#define MVPP2_FLOW_FIELD_MASK(num) (((1 << \
+ MVPP2_FLOW_FIELD_ID_BITS) - 1) << (MVPP2_FLOW_FIELD_ID_BITS * (num)))
+#define MVPP2_FLOW_FIELD_MAX ((1 << MVPP2_FLOW_FIELD_ID_BITS) - 1)
+
+/* lookup id attribute define */
+#define MVPP2_PRS_FL_ATTR_VLAN_BIT BIT(0)
+#define MVPP2_PRS_FL_ATTR_IP4_BIT BIT(1)
+#define MVPP2_PRS_FL_ATTR_IP6_BIT BIT(2)
+#define MVPP2_PRS_FL_ATTR_ARP_BIT BIT(3)
+#define MVPP2_PRS_FL_ATTR_FRAG_BIT BIT(4)
+#define MVPP2_PRS_FL_ATTR_TCP_BIT BIT(5)
+#define MVPP2_PRS_FL_ATTR_UDP_BIT BIT(6)
+
+/* PP22 RSS Registers */
+#define MVPP22_RSS_IDX_REG 0x1500
+#define MVPP22_RSS_IDX_ENTRY_NUM_OFF 0
+#define MVPP22_RSS_IDX_ENTRY_NUM_MASK 0x1F
+#define MVPP22_RSS_IDX_TBL_NUM_OFF 8
+#define MVPP22_RSS_IDX_TBL_NUM_MASK 0x700
+#define MVPP22_RSS_IDX_RXQ_NUM_OFF 16
+#define MVPP22_RSS_IDX_RXQ_NUM_MASK 0xFF0000
+#define MVPP22_RSS_RXQ2RSS_TBL_REG 0x1504
+#define MVPP22_RSS_RXQ2RSS_TBL_POINT_OFF 0
+#define MVPP22_RSS_RXQ2RSS_TBL_POINT_MASK 0x7
+#define MVPP22_RSS_TBL_ENTRY_REG 0x1508
+#define MVPP22_RSS_TBL_ENTRY_OFF 0
+#define MVPP22_RSS_TBL_ENTRY_MASK 0xFF
+#define MVPP22_RSS_WIDTH_REG 0x150c
+#define MVPP22_RSS_WIDTH_OFF 0
+#define MVPP22_RSS_WIDTH_MASK 0xF
+#define MVPP22_RSS_HASH_SEL_REG 0x1510
+#define MVPP22_RSS_HASH_SEL_OFF 0
+#define MVPP22_RSS_HASH_SEL_MASK 0x1
+/* RSS consant */
+#define MVPP22_RSS_TBL_NUM 8
+#define MVPP22_RSS_TBL_LINE_NUM 32
+#define MVPP22_RSS_WIDTH_MAX 8
+
+/* MAC entries, shadow udf */
+enum mv_pp2x_prs_udf {
+ MVPP2_PRS_UDF_MAC_DEF,
+ MVPP2_PRS_UDF_MAC_RANGE,
+ MVPP2_PRS_UDF_L2_DEF,
+ MVPP2_PRS_UDF_L2_DEF_COPY,
+ MVPP2_PRS_UDF_L2_USER,
+};
+
+/* L2 cast in parser result info */
+enum mv_pp2x_l2_cast {
+ MVPP2_PRS_MAC_UC,
+ MVPP2_PRS_MAC_MC,
+ MVPP2_PRS_MAC_BC,
+};
+
+/* Lookup ID */
+enum mv_pp2x_prs_lookup {
+ MVPP2_PRS_LU_MH,
+ MVPP2_PRS_LU_MAC,
+ MVPP2_PRS_LU_DSA,
+ MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_L2,
+ MVPP2_PRS_LU_PPPOE,
+ MVPP2_PRS_LU_IP4,
+ MVPP2_PRS_LU_IP6,
+ MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum mv_pp2x_prs_l3_cast {
+ MVPP2_PRS_L3_UNI_CAST,
+ MVPP2_PRS_L3_MULTI_CAST,
+ MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Packet flow ID */
+enum mv_pp2x_prs_flow {
+ MVPP2_PRS_FL_START = 8,
+ MVPP2_PRS_FL_IP4_TCP_NF_UNTAG = MVPP2_PRS_FL_START,
+ MVPP2_PRS_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_PRS_FL_IP4_TCP_NF_TAG,
+ MVPP2_PRS_FL_IP4_UDP_NF_TAG,
+ MVPP2_PRS_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_PRS_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_PRS_FL_IP6_TCP_NF_TAG,
+ MVPP2_PRS_FL_IP6_UDP_NF_TAG,
+ MVPP2_PRS_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_PRS_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_PRS_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_PRS_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_PRS_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_PRS_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_PRS_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_PRS_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_PRS_FL_IP4_UNTAG, /* non-TCP, non-UDP, same for below */
+ MVPP2_PRS_FL_IP4_TAG,
+ MVPP2_PRS_FL_IP6_UNTAG,
+ MVPP2_PRS_FL_IP6_TAG,
+ MVPP2_PRS_FL_NON_IP_UNTAG,
+ MVPP2_PRS_FL_NON_IP_TAG,
+ MVPP2_PRS_FL_LAST,
+ MVPP2_PRS_FL_TCAM_NUM = 52, /* The parser TCAM lines needed to
+ *generate flow ID
+ */
+};
+
+enum mv_pp2x_cls_engine_num {
+ MVPP2_CLS_ENGINE_C2 = 1,
+ MVPP2_CLS_ENGINE_C3A,
+ MVPP2_CLS_ENGINE_C3B,
+ MVPP2_CLS_ENGINE_C4,
+ MVPP2_CLS_ENGINE_C3HA = 6,
+ MVPP2_CLS_ENGINE_C3HB,
+};
+
+enum mv_pp2x_cls_lkp_type {
+ MVPP2_CLS_LKP_HASH = 0,
+ MVPP2_CLS_LKP_VLAN_PRI,
+ MVPP2_CLS_LKP_DSCP_PRI,
+ MVPP2_CLS_LKP_DEFAULT,
+ MVPP2_CLS_LKP_MAX,
+};
+
+enum mv_pp2x_cls_fl_pri {
+ MVPP2_CLS_FL_COS_PRI = 0,
+ MVPP2_CLS_FL_RSS_PRI,
+};
+
+enum mv_pp2x_cls_filed_id {
+ MVPP2_CLS_FIELD_IP4SA = 0x10,
+ MVPP2_CLS_FIELD_IP4DA = 0x11,
+ MVPP2_CLS_FIELD_IP6SA = 0x17,
+ MVPP2_CLS_FIELD_IP6DA = 0x18,
+ MVPP2_CLS_FIELD_L4SIP = 0x1D,
+ MVPP2_CLS_FIELD_L4DIP = 0x1E,
+};
+
+enum mv_pp2x_cos_type {
+ MVPP2_COS_TYPE_DEF = 0,
+ MVPP2_COS_TYPE_VLAN,
+ MVPP2_COS_TYPE_DSCP,
+};
+
+enum mv_pp2x_rss_hash_mode {
+ MVPP2_RSS_HASH_2T = 0,
+ MVPP2_RSS_HASH_5T,
+};
+
+enum mv_pp2x_mac_del_option {
+ MVPP2_DEL_MAC_ALL = 0,
+ MVPP2_DEL_MAC_NOT_IN_LIST,
+};
+
+struct mv_pp2x_prs_result_info {
+ u32 ri;
+ u32 ri_mask;
+};
+
+struct mv_pp2x_prs_flow_id {
+ u32 flow_id;
+ struct mv_pp2x_prs_result_info prs_result;
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE 512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_FLOWS_TBL_FIELDS_MAX 4
+
+#define MVPP2_CLS_LKP_TBL_SIZE 64
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS 8
+#define MVPP2_BM_COOKIE_CPU_OFFS 24
+
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
+#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
+#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
+
+#define MVPP2_BM_SHORT_FRAME_SIZE 1024
+#define MVPP2_BM_LONG_FRAME_SIZE 2048
+#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
+
+enum mv_pp2x_bm_pool_log_num {
+ MVPP2_BM_SWF_SHORT_POOL,
+ MVPP2_BM_SWF_LONG_POOL,
+ MVPP2_BM_SWF_JUMBO_POOL,
+ MVPP2_BM_SWF_NUM_POOLS
+};
+
+/* The mv_pp2x_tx_desc and mv_pp2x_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT 0
+#define MVPP2_TXD_IP_HLEN_SHIFT 8
+#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE BIT(23)
+#define MVPP2_TXD_L4_UDP BIT(24)
+#define MVPP2_TXD_L3_IP6 BIT(26)
+#define MVPP2_TXD_L_DESC BIT(28)
+#define MVPP2_TXD_F_DESC BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_OVERRUN BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS 16
+#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
+#define MVPP2_RXD_L4_TCP BIT(25)
+#define MVPP2_RXD_L4_UDP BIT(26)
+#define MVPP2_RXD_L3_IP4 BIT(28)
+#define MVPP2_RXD_L3_IP6 BIT(30)
+#define MVPP2_RXD_BUF_HDR BIT(31)
+/* Sub fields of "parserInfo" field */
+#define MVPP2_RXD_LKP_ID_OFFS 0
+#define MVPP2_RXD_LKP_ID_BITS 6
+#define MVPP2_RXD_LKP_ID_MASK (((1 << \
+ MVPP2_RXD_LKP_ID_BITS) - 1) << MVPP2_RXD_LKP_ID_OFFS)
+#define MVPP2_RXD_CPU_CODE_OFFS 6
+#define MVPP2_RXD_CPU_CODE_BITS 3
+#define MVPP2_RXD_CPU_CODE_MASK (((1 << \
+ MVPP2_RXD_CPU_CODE_BITS) - 1) << MVPP2_RXD_CPU_CODE_OFFS)
+#define MVPP2_RXD_PPPOE_BIT 9
+#define MVPP2_RXD_PPPOE_MASK BIT(MVPP2_RXD_PPPOE_BIT)
+#define MVPP2_RXD_L3_CAST_OFFS 10
+#define MVPP2_RXD_L3_CAST_BITS 2
+#define MVPP2_RXD_L3_CAST_MASK (((1 << \
+ MVPP2_RXD_L3_CAST_BITS) - 1) << MVPP2_RXD_L3_CAST_OFFS)
+#define MVPP2_RXD_L2_CAST_OFFS 12
+#define MVPP2_RXD_L2_CAST_BITS 2
+#define MVPP2_RXD_L2_CAST_MASK (((1 << \
+ MVPP2_RXD_L2_CAST_BITS) - 1) << MVPP2_RXD_L2_CAST_OFFS)
+#define MVPP2_RXD_VLAN_INFO_OFFS 14
+#define MVPP2_RXD_VLAN_INFO_BITS 2
+#define MVPP2_RXD_VLAN_INFO_MASK (((1 << \
+ MVPP2_RXD_VLAN_INFO_BITS) - 1) << MVPP2_RXD_VLAN_INFO_OFFS)
+/* Bits of "bmQset" field */
+#define MVPP2_RXD_BUFF_QSET_NUM_OFFS 0
+#define MVPP2_RXD_BUFF_QSET_NUM_MASK (0x7f << MVPP2_RXD_BUFF_QSET_NUM_OFFS)
+#define MVPP2_RXD_BUFF_TYPE_OFFS 7
+#define MVPP2_RXD_BUFF_TYPE_MASK (0x1 << MVPP2_RXD_BUFF_TYPE_OFFS)
+/* Bits of "status" field */
+#define MVPP2_RXD_L3_OFFSET_OFFS 0
+#define MVPP2_RXD_L3_OFFSET_MASK (0x7F << MVPP2_RXD_L3_OFFSET_OFFS)
+#define MVPP2_RXD_IP_HLEN_OFFS 8
+#define MVPP2_RXD_IP_HLEN_MASK (0x1F << MVPP2_RXD_IP_HLEN_OFFS)
+#define MVPP2_RXD_ES_BIT 15
+#define MVPP2_RXD_ES_MASK BIT(MVPP2_RXD_ES_BIT)
+#define MVPP2_RXD_HWF_SYNC_BIT 21
+#define MVPP2_RXD_HWF_SYNC_MASK BIT(MVPP2_RXD_HWF_SYNC_BIT)
+#define MVPP2_RXD_L4_CHK_OK_BIT 22
+#define MVPP2_RXD_L4_CHK_OK_MASK BIT(MVPP2_RXD_L4_CHK_OK_BIT)
+#define MVPP2_RXD_IP_FRAG_BIT 23
+#define MVPP2_RXD_IP_FRAG_MASK BIT(MVPP2_RXD_IP_FRAG_BIT)
+#define MVPP2_RXD_IP4_HEADER_ERR_BIT 24
+#define MVPP2_RXD_IP4_HEADER_ERR_MASK BIT(MVPP2_RXD_IP4_HEADER_ERR_BIT)
+#define MVPP2_RXD_L4_OFFS 25
+#define MVPP2_RXD_L4_MASK (7 << MVPP2_RXD_L4_OFFS)
+/* Value 0 - N/A, 3-7 - User Defined */
+#define MVPP2_RXD_L3_OFFS 28
+#define MVPP2_RXD_L3_MASK (7 << MVPP2_RXD_L3_OFFS)
+/* Value 0 - N/A, 6-7 - User Defined */
+#define MVPP2_RXD_L3_IP4_OPT (2 << MVPP2_RXD_L3_OFFS)
+#define MVPP2_RXD_L3_IP4_OTHER (3 << MVPP2_RXD_L3_OFFS)
+#define MVPP2_RXD_L3_IP6_EXT (5 << MVPP2_RXD_L3_OFFS)
+#define MVPP2_RXD_BUF_HDR_BIT 31
+#define MVPP2_RXD_BUF_HDR_MASK BIT(MVPP2_RXD_BUF_HDR_BIT)
+/* status field MACROs */
+#define MVPP2_RXD_L3_IS_IP4(status) (((status) & \
+ MVPP2_RXD_L3_MASK) == MVPP2_RXD_L3_IP4)
+#define MVPP2_RXD_L3_IS_IP4_OPT(status) (((status) & \
+ MVPP2_RXD_L3_MASK) == MVPP2_RXD_L3_IP4_OPT)
+#define MVPP2_RXD_L3_IS_IP4_OTHER(status) (((status) & \
+ MVPP2_RXD_L3_MASK) == MVPP2_RXD_L3_IP4_OTHER)
+#define MVPP2_RXD_L3_IS_IP6(status) (((status) & \
+ MVPP2_RXD_L3_MASK) == MVPP2_RXD_L3_IP6)
+#define MVPP2_RXD_L3_IS_IP6_EXT(status) (((status) & \
+ MVPP2_RXD_L3_MASK) == MVPP2_RXD_L3_IP6_EXT)
+#define MVPP2_RXD_L4_IS_UDP(status) (((status) & \
+ MVPP2_RXD_L4_MASK) == MVPP2_RXD_L4_UDP)
+#define MVPP2_RXD_L4_IS_TCP(status) (((status) & \
+ MVPP2_RXD_L4_MASK) == MVPP2_RXD_L4_TCP)
+#define MVPP2_RXD_IP4_HDR_ERR(status) ((status) & \
+ MVPP2_RXD_IP4_HEADER_ERR_MASK)
+#define MVPP2_RXD_IP4_FRG(status) ((status) & \
+ MVPP2_RXD_IP_FRAG_MASK)
+#define MVPP2_RXD_L4_CHK_OK(status) ((status) & \
+ MVPP2_RXD_L4_CHK_OK_MASK)
+
+struct pp21_specific_tx_desc {
+ u32 buf_phys_addr; /* physical addr of transmitted buffer */
+ u32 buf_cookie; /* cookie for access to TX buffer in tx path */
+ u32 rsrvd_hw_cmd[3]; /* hw_cmd (for future use, BM, PON, PNC) */
+ u32 rsrvd1; /* reserved (for future use) */
+};
+
+struct pp22_specific_tx_desc {
+ u64 rsrvd_hw_cmd1; /* hw_cmd (BM, PON, PNC) */
+ u64 buf_phys_addr_hw_cmd2;
+ u64 buf_cookie_bm_qset_hw_cmd3;
+ /* cookie for access to RX buffer in rx path */
+ /* cookie for access to RX buffer in rx path */
+ /* bm_qset (for future use, BM) */
+ /* classify_info (for future use, PnC) */
+};
+
+union pp2x_specific_tx_desc {
+ struct pp21_specific_tx_desc pp21;
+ struct pp22_specific_tx_desc pp22;
+};
+
+struct mv_pp2x_tx_desc {
+ u32 command; /* Options used by HW for packet xmitting */
+ u8 packet_offset; /* the offset from the buffer beginning */
+ u8 phys_txq; /* destination queue ID */
+ u16 data_size; /* data size of transmitted packet in bytes */
+ union pp2x_specific_tx_desc u;
+};
+
+struct pp21_specific_rx_desc {
+ u32 buf_phys_addr; /* physical address of the buffer */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+ u16 rsrvd_gem; /* gem_port_id (for future use, PON) */
+ u16 rsrvd_l4csum; /* csum_l4 (for future use, PnC) */
+ u8 rsrvd_bm_qset; /* bm_qset (for future use, BM) */
+ u8 rsrvd1;
+ u16 rsrvd_cls_info; /* classify_info (for future use, PnC) */
+ u32 rsrvd_flow_id; /* flow_id (for future use, PnC) */
+ u32 rsrvd_abs;
+};
+
+struct pp22_specific_rx_desc {
+ u16 rsrvd_gem; /* gem_port_id (for future use, PON) */
+ u16 rsrvd_l4csum; /* csum_l4 (for future use, PnC) */
+ u32 rsrvd_timestamp;
+ u64 buf_phys_addr_key_hash;
+ u64 buf_cookie_bm_qset_cls_info;
+ /* cookie for access to RX buffer in rx path */
+ /* bm_qset (for future use, BM) */
+ /* classify_info (for future use, PnC) */
+};
+
+union pp2x_specific_rx_desc {
+ struct pp21_specific_rx_desc pp21;
+ struct pp22_specific_rx_desc pp22;
+};
+
+struct mv_pp2x_rx_desc {
+ u32 status; /* info about received packet */
+ u16 rsrvd_parser; /* parser_info (for future use, PnC) */
+ u16 data_size; /* size of received packet in bytes */
+ union pp2x_specific_rx_desc u;
+};
+
+union mv_pp2x_prs_tcam_entry {
+ u32 word[MVPP2_PRS_TCAM_WORDS];
+ u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mv_pp2x_prs_sram_entry {
+ u32 word[MVPP2_PRS_SRAM_WORDS];
+ u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mv_pp2x_prs_entry {
+ u32 index;
+ union mv_pp2x_prs_tcam_entry tcam;
+ union mv_pp2x_prs_sram_entry sram;
+};
+
+struct mv_pp2x_prs_shadow {
+ bool valid;
+ bool finish;
+
+ /* Lookup ID */
+ int lu;
+
+ /* User defined offset */
+ int udf;
+
+ /* Result info */
+ u32 ri;
+ u32 ri_mask;
+};
+
+struct mv_pp2x_cls_flow_entry {
+ u32 index;
+ u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mv_pp2x_cls_lookup_entry {
+ u32 lkpid;
+ u32 way;
+ u32 data;
+};
+
+struct mv_pp2x_cls_flow_info {
+ u32 lkpid;
+ /* The flow table entry index of CoS default rule */
+ u32 flow_entry_dflt;
+ /* The flow table entry index of CoS VLAN rule */
+ u32 flow_entry_vlan;
+ /* The flow table entry index of CoS DSCP rule */
+ u32 flow_entry_dscp;
+ /* The flow table entry index of RSS rule */
+ u32 flow_entry_rss1;
+ /* The flow table entry index of RSS rule for UDP packet to
+ * update hash mode
+ */
+ u32 flow_entry_rss2;
+};
+
+/* The flow entry could become lkp pointer in lookup table */
+enum mv_pp2x_cls_lkp_ptr_candidate {
+ MVPP2_LKP_PTR_FLOW_DEFAULT,
+ MVPP2_LKP_PTR_FLOW_VLAN,
+ MVPP2_LKP_PTR_FLOW_DSCP,
+ MVPP2_LKP_PTR_NUM
+};
+
+struct mv_pp2x_cls_shadow {
+ struct mv_pp2x_cls_flow_info *flow_info;
+ u32 flow_free_start; /* The start of free entry index in flow table */
+ /* TODO: does need a spin_lock for flow_free_start? */
+};
+
+/* Classifier engine2 and QoS structure */
+
+/* C2 constants */
+#define MVPP2_CLS_C2_TCAM_SIZE 256
+#define MVPP2_CLS_C2_TCAM_WORDS 5
+#define MVPP2_CLS_C2_TCAM_DATA_BYTES 10
+#define MVPP2_CLS_C2_SRAM_WORDS 5
+#define MVPP2_CLS_C2_HEK_LKP_TYPE_OFFS 0
+#define MVPP2_CLS_C2_HEK_LKP_TYPE_BITS 6
+#define MVPP2_CLS_C2_HEK_LKP_TYPE_MASK (0x3F << \
+ MVPP2_CLS_C2_HEK_LKP_TYPE_OFFS)
+#define MVPP2_CLS_C2_HEK_PORT_TYPE_OFFS 6
+#define MVPP2_CLS_C2_HEK_PORT_TYPE_BITS 2
+#define MVPP2_CLS_C2_HEK_PORT_TYPE_MASK (0x3 << \
+ MVPP2_CLS_C2_HEK_PORT_TYPE_OFFS)
+#define MVPP2_CLS_C2_QOS_DSCP_TBL_SIZE 64
+#define MVPP2_CLS_C2_QOS_PRIO_TBL_SIZE 8
+#define MVPP2_CLS_C2_QOS_DSCP_TBL_NUM 8
+#define MVPP2_CLS_C2_QOS_PRIO_TBL_NUM 64
+
+struct mv_pp2x_cls_c2_entry {
+ u32 index;
+ bool inv;
+ union {
+ u32 words[MVPP2_CLS_C2_TCAM_WORDS];
+ u8 bytes[MVPP2_CLS_C2_TCAM_WORDS * 4];
+ } tcam;
+ union {
+ u32 words[MVPP2_CLS_C2_SRAM_WORDS];
+ struct {
+ u32 action_tbl; /* 0x1B30 */
+ u32 actions; /* 0x1B60 */
+ u32 qos_attr; /* 0x1B64*/
+ u32 hwf_attr; /* 0x1B68 */
+ u32 rss_attr; /* 0x1B6C */
+ u32 seq_attr; /* 0x1B70 */
+ } regs;
+ } sram;
+};
+
+enum mv_pp2x_cls2_hek_offs {
+ MVPP2_CLS_C2_HEK_OFF_BYTE0 = 0,
+ MVPP2_CLS_C2_HEK_OFF_BYTE1,
+ MVPP2_CLS_C2_HEK_OFF_BYTE2,
+ MVPP2_CLS_C2_HEK_OFF_BYTE3,
+ MVPP2_CLS_C2_HEK_OFF_BYTE4,
+ MVPP2_CLS_C2_HEK_OFF_BYTE5,
+ MVPP2_CLS_C2_HEK_OFF_BYTE6,
+ MVPP2_CLS_C2_HEK_OFF_BYTE7,
+ MVPP2_CLS_C2_HEK_OFF_LKP_PORT_TYPE,
+ MVPP2_CLS_C2_HEK_OFF_PORT_ID,
+ MVPP2_CLS_C2_HEK_OFF_MAX
+};
+
+struct mv_pp2x_cls_c2_qos_entry {
+ u32 tbl_id;
+ u32 tbl_sel;
+ u32 tbl_line;
+ u32 data;
+};
+
+enum mv_pp2x_src_port_type {
+ MVPP2_SRC_PORT_TYPE_PHY,
+ MVPP2_SRC_PORT_TYPE_UNI,
+ MVPP2_SRC_PORT_TYPE_VIR
+};
+
+struct mv_pp2x_src_port {
+ enum mv_pp2x_src_port_type port_type;
+ u32 port_value;
+ u32 port_mask;
+};
+
+enum mv_pp2x_qos_tbl_sel {
+ MVPP2_QOS_TBL_SEL_PRI = 0,
+ MVPP2_QOS_TBL_SEL_DSCP,
+};
+
+enum mv_pp2x_qos_src_tbl {
+ MVPP2_QOS_SRC_ACTION_TBL = 0,
+ MVPP2_QOS_SRC_DSCP_PBIT_TBL,
+};
+
+struct mv_pp2x_engine_qos_info {
+ /* dscp pri table or none */
+ enum mv_pp2x_qos_tbl_sel qos_tbl_type;
+ /* dscp or pri table index */
+ u32 qos_tbl_index;
+ /* policer id, 0xffff do not assign policer */
+ u16 policer_id;
+ /* pri/dscp comes from qos or act tbl */
+ enum mv_pp2x_qos_src_tbl pri_dscp_src;
+ /* gemport comes from qos or act tbl */
+ enum mv_pp2x_qos_src_tbl gemport_src;
+ enum mv_pp2x_qos_src_tbl q_low_src;
+ enum mv_pp2x_qos_src_tbl q_high_src;
+ enum mv_pp2x_qos_src_tbl color_src;
+};
+
+enum mv_pp2x_color_action_type {
+ /* Do not update color */
+ MVPP2_COLOR_ACTION_TYPE_NO_UPDT = 0,
+ /* Do not update color and lock */
+ MVPP2_COLOR_ACTION_TYPE_NO_UPDT_LOCK,
+ /* Update to green */
+ MVPP2_COLOR_ACTION_TYPE_GREEN,
+ /* Update to green and lock */
+ MVPP2_COLOR_ACTION_TYPE_GREEN_LOCK,
+ /* Update to yellow */
+ MVPP2_COLOR_ACTION_TYPE_YELLOW,
+ /* Update to yellow */
+ MVPP2_COLOR_ACTION_TYPE_YELLOW_LOCK,
+ /* Update to red */
+ MVPP2_COLOR_ACTION_TYPE_RED,
+ /* Update to red and lock */
+ MVPP2_COLOR_ACTION_TYPE_RED_LOCK,
+};
+
+enum mv_pp2x_general_action_type {
+ /* The field will be not updated */
+ MVPP2_ACTION_TYPE_NO_UPDT,
+ /* The field will be not updated and lock */
+ MVPP2_ACTION_TYPE_NO_UPDT_LOCK,
+ /* The field will be updated */
+ MVPP2_ACTION_TYPE_UPDT,
+ /* The field will be updated and lock */
+ MVPP2_ACTION_TYPE_UPDT_LOCK,
+};
+
+enum mv_pp2x_flowid_action_type {
+ /* FlowID is disable */
+ MVPP2_ACTION_FLOWID_DISABLE = 0,
+ /* FlowID is enable */
+ MVPP2_ACTION_FLOWID_ENABLE,
+};
+
+enum mv_pp2x_frwd_action_type {
+ /* The decision will be not updated */
+ MVPP2_FRWD_ACTION_TYPE_NO_UPDT,
+ /* The decision is not updated, and following no change to it */
+ MVPP2_FRWD_ACTION_TYPE_NO_UPDT_LOCK,
+ /* The packet to CPU (Software Forwarding) */
+ MVPP2_FRWD_ACTION_TYPE_SWF,
+ /* The packet to CPU, and following no change to it */
+ MVPP2_FRWD_ACTION_TYPE_SWF_LOCK,
+ /* The packet to one transmit port (Hardware Forwarding) */
+ MVPP2_FRWD_ACTION_TYPE_HWF,
+ /* The packet to one tx port, and following no change to it */
+ MVPP2_FRWD_ACTION_TYPE_HWF_LOCK,
+ /* The pkt to one tx port, and maybe internal packets is used */
+ MVPP2_FRWD_ACTION_TYPE_HWF_LOW_LATENCY,
+ /* Same to above, but following no change to it*/
+ MVPP2_FRWD_ACTION_TYPE_HWF_LOW_LATENCY_LOCK,
+};
+
+struct mv_pp2x_engine_pkt_action {
+ enum mv_pp2x_color_action_type color_act;
+ enum mv_pp2x_general_action_type pri_act;
+ enum mv_pp2x_general_action_type dscp_act;
+ enum mv_pp2x_general_action_type gemp_act;
+ enum mv_pp2x_general_action_type q_low_act;
+ enum mv_pp2x_general_action_type q_high_act;
+ enum mv_pp2x_general_action_type rss_act;
+ enum mv_pp2x_flowid_action_type flowid_act;
+ enum mv_pp2x_frwd_action_type frwd_act;
+};
+
+struct mv_pp2x_qos_value {
+ u16 pri;
+ u16 dscp;
+ u16 gemp;
+ u16 q_low;
+ u16 q_high;
+};
+
+struct mv_pp2x_engine_pkt_mod {
+ u32 mod_cmd_idx;
+ u32 mod_data_idx;
+ u32 l4_chksum_update_flag;
+};
+
+struct mv_pp2x_duplicate_info {
+ /* pkt duplication flow id */
+ u32 flow_id;
+ /* pkt duplication count */
+ u32 flow_cnt;
+};
+
+/* The logic C2 entry, easy to understand and use */
+struct mv_pp2x_c2_add_entry {
+ struct mv_pp2x_src_port port;
+ u8 lkp_type;
+ u8 lkp_type_mask;
+ /* priority in this look_type */
+ u32 priority;
+ /* all the qos input */
+ struct mv_pp2x_engine_qos_info qos_info;
+ /* update&lock info */
+ struct mv_pp2x_engine_pkt_action action;
+ /* pri/dscp/gemport/qLow/qHigh */
+ struct mv_pp2x_qos_value qos_value;
+ /* PMT cmd_idx and data_idx */
+ struct mv_pp2x_engine_pkt_mod pkt_mod;
+ /* RSS enable or disable */
+ int rss_en;
+ /* pkt duplication flow info */
+ struct mv_pp2x_duplicate_info flow_info;
+};
+
+struct mv_pp2x_c2_rule_idx {
+ /* The TCAM rule index for VLAN pri check with QoS pbit table */
+ u32 vlan_pri_idx;
+ /* The TCAM rule index for DSCP check with QoS dscp table */
+ u32 dscp_pri_idx;
+ /* The default rule for flow untagged and non-IP */
+ u32 default_rule_idx;
+};
+
+struct mv_pp2x_c2_shadow {
+ int c2_tcam_free_start;
+ /* Per src port */
+ struct mv_pp2x_c2_rule_idx rule_idx_info[8];
+};
+
+struct mv_pp2x_bm_pool {
+ /* Pool number in the range 0-7 */
+ int id;
+
+ /*Logical id, equals to index in parent priv */
+ enum mv_pp2x_bm_pool_log_num log_id;
+
+ /* Buffer Pointers Pool External (BPPE) size */
+ int size;
+ /* Number of buffers for this pool */
+ int buf_num;
+ /* Pool buffer size */
+ int buf_size;
+ /* Packet size */
+ int pkt_size;
+ int frag_size;
+ /* pool for external use (not kernel) */
+ bool external_pool;
+ /* BPPE virtual base address */
+ void *virt_addr;
+ /* BPPE physical base address */
+ dma_addr_t phys_addr;
+
+ /* Ports using BM pool */
+ u32 port_map;
+
+ int in_use_thresh;
+};
+
+struct mv_pp2x_buff_hdr {
+ u32 next_buff_phys_addr;
+ u32 next_buff_virt_addr;
+ u16 byte_count;
+ u16 info;
+ u8 reserved1; /* bm_qset (for future use, BM) */
+};
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Macroes */
+#define MVPP2_RX_DESC_POOL(rx_desc) ((rx_desc->status & \
+ MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS)
+
+/* RSS related definetions */
+enum mv_pp22_rss_access_sel {
+ MVPP22_RSS_ACCESS_POINTER,
+ MVPP22_RSS_ACCESS_TBL,
+};
+
+enum mv_pp2_rss_hash_select {
+ MVPP2_RSS_HASH_0_4,
+ MVPP2_RSS_HASH_5_9,
+};
+
+/* Structure dexcribe RXQ and corresponding rss table */
+struct mv_pp22_rss_tbl_ptr {
+ u8 rxq_idx;
+ u8 rss_tbl_ptr;
+};
+
+/* Normal RSS entry */
+struct mv_pp22_rss_tbl_entry {
+ u8 tbl_id;
+ u8 tbl_line;
+ u8 width;
+ u8 rxq;
+};
+
+union mv_pp22_rss_access_entry {
+ struct mv_pp22_rss_tbl_ptr pointer;
+ struct mv_pp22_rss_tbl_entry entry;
+};
+
+struct mv_pp22_rss_entry {
+ enum mv_pp22_rss_access_sel sel;
+ union mv_pp22_rss_access_entry u;
+};
+
+/* C3 or other module definetions */
+#define MVPP2_CLS_C3_HASH_TBL_SIZE (4096)
+#define MVPP2_CLS_C3_MISS_TBL_SIZE (64)
+#define MVPP2_CLS_C3_EXT_HEK_WORDS (9)
+#define MVPP2_CLS_C3_SRAM_WORDS (5)
+#define MVPP2_CLS_C3_EXT_TBL_SIZE (256)
+#define MVPP2_CLS_C3_HEK_WORDS (3)
+#define MVPP2_CLS_C3_HEK_BYTES 12 /* size in bytes */
+#define MVPP2_CLS_C3_BANK_SIZE (512)
+#define MVPP2_CLS_C3_MAX_SEARCH_DEPTH (16)
+
+/* Classifier C3 offsets in hash table */
+#define KEY_OCCUPIED (116)
+#define KEY_FORMAT (115)
+#define KEY_PTR_EXT (107)
+
+#define KEY_PRT_ID(ext_mode) ((ext_mode == 1) ? (99) : (107))
+#define KEY_PRT_ID_MASK(ext_mode) (((1 << KEY_CTRL_PRT_ID_BITS) - 1) << (KEY_PRT_ID(ext_mode) % 32))
+
+#define KEY_PRT_ID_TYPE(ext_mode) ((ext_mode == 1) ? (97) : (105))
+#define KEY_PRT_ID_TYPE_MASK(ext_mode) ((KEY_CTRL_PRT_ID_TYPE_MAX) << (KEY_PRT_ID_TYPE(ext_mode) % 32))
+
+#define KEY_LKP_TYPE(ext_mode) ((ext_mode == 1) ? (91) : (99))
+#define KEY_LKP_TYPE_MASK(ext_mode) (((1 << KEY_CTRL_LKP_TYPE_BITS) - 1) << (KEY_LKP_TYPE(ext_mode) % 32))
+
+#define KEY_L4_INFO(ext_mode) ((ext_mode == 1) ? (88) : (96))
+#define KEY_L4_INFO_MASK(ext_mode) (((1 << KEY_CTRL_L4_BITS) - 1) << (KEY_L4_INFO(ext_mode) % 32))
+
+#define KEY_CTRL_LKP_TYPE 4
+#define KEY_CTRL_LKP_TYPE_BITS 6
+
+#define KEY_CTRL_LKP_TYPE_MAX ((1 << KEY_CTRL_LKP_TYPE_BITS) - 1)
+#define KEY_CTRL_LKP_TYPE_MASK (((1 << KEY_CTRL_LKP_TYPE_BITS) - 1) << KEY_CTRL_LKP_TYPE)
+
+#define KEY_CTRL_PRT_ID_TYPE 12
+#define KEY_CTRL_PRT_ID_TYPE_BITS 2
+#define KEY_CTRL_PRT_ID_TYPE_MAX ((1 << KEY_CTRL_PRT_ID_TYPE_BITS) - 1)
+#define KEY_CTRL_PRT_ID_TYPE_MASK ((KEY_CTRL_PRT_ID_TYPE_MAX) << KEY_CTRL_PRT_ID_TYPE)
+
+#define KEY_CTRL_PRT_ID 16
+#define KEY_CTRL_PRT_ID_BITS 8
+#define KEY_CTRL_PRT_ID_MAX ((1 << KEY_CTRL_PRT_ID_BITS) - 1)
+#define KEY_CTRL_PRT_ID_MASK (((1 << KEY_CTRL_PRT_ID_BITS) - 1) << KEY_CTRL_PRT_ID)
+
+#define KEY_CTRL_HEK_SIZE 24
+#define KEY_CTRL_HEK_SIZE_BITS 6
+#define KEY_CTRL_HEK_SIZE_MAX 36
+#define KEY_CTRL_HEK_SIZE_MASK (((1 << KEY_CTRL_HEK_SIZE_BITS) - 1) << KEY_CTRL_HEK_SIZE)
+
+struct mv_pp2x_cls_c3_hash_pair {
+ u16 pair_num;
+ u16 old_idx[MVPP2_CLS_C3_MAX_SEARCH_DEPTH];
+ u16 new_idx[MVPP2_CLS_C3_MAX_SEARCH_DEPTH];
+};
+
+struct mv_pp2x_cls_c3_entry {
+ u32 index;
+ u32 ext_index;
+
+ struct {
+ union {
+ u32 words[MVPP2_CLS_C3_EXT_HEK_WORDS];
+ u8 bytes[MVPP2_CLS_C3_EXT_HEK_WORDS * 4];
+ } hek;
+ u32 key_ctrl;/*0x1C10*/
+ } key;
+ union {
+ u32 words[MVPP2_CLS_C3_SRAM_WORDS];
+ struct {
+ u32 actions;/*0x1D40*/
+ u32 qos_attr;/*0x1D44*/
+ u32 hwf_attr;/*0x1D48*/
+ u32 dup_attr;/*0x1D4C*/
+ u32 seq_l_attr;/*0x1D50*/
+ u32 seq_h_attr;/*0x1D54*/
+ } regs;
+ } sram;
+};
+
+struct mv_pp2x_cls_c3_shadow_hash_entry {
+ /* valid if size > 0 */
+ /* size include the extension*/
+ int ext_ptr;
+ int size;
+};
+
+/* Classifier C4 Top Registers */
+#define MVPP2_CLS4_PHY_TO_RL_REG(port) (0x1E00 + ((port) * 4))
+#define MVPP2_CLS4_PHY_TO_RL_GRP 0
+#define MVPP2_CLS4_PHY_TO_RL_GRP_BITS 3
+#define MVPP2_CLS4_PHY_TO_RL_GRP_MASK (((1 << MVPP2_CLS4_PHY_TO_RL_GRP_BITS) - 1) << \
+ MVPP2_CLS4_PHY_TO_RL_GRP)
+#define MVPP2_CLS4_PHY_TO_RL_RULE_NUM 4
+#define MVPP2_CLS4_PHY_TO_RL_RULE_NUM_BITS 4
+#define MVPP2_CLS4_PHY_TO_RL_RULE_NUM_MASK (((1 << MVPP2_CLS4_PHY_TO_RL_RULE_NUM_BITS) - 1) << \
+ MVPP2_CLS4_PHY_TO_RL_RULE_NUM)
+
+#define MVPP2_CLS4_UNI_TO_RL_REG(uni) (0x1E20 + ((uni) * 4))
+#define MVPP2_CLS4_UNI_TO_RL_GRP 0
+#define MVPP2_CLS4_UNI_TO_RL_RULE_NUM 4
+
+#define MVPP2_CLS4_RL_INDEX_REG (0x1E40)
+#define MVPP2_CLS4_RL_INDEX_RULE 0
+#define MVPP2_CLS4_RL_INDEX_GRP 3
+
+#define MVPP2_CLS4_FATTR1_REG (0x1E50)
+#define MVPP2_CLS4_FATTR2_REG (0x1E54)
+#define MVPP2_CLS4_FATTR_REG_NUM 2
+
+#define MVPP2_CLS4_FATTR_ID(field) (((field) * 9) % 27)
+#define MVPP2_CLS4_FATTR_ID_BITS 6
+#define MVPP2_CLS4_FATTR_ID_MAX ((1 << MVPP2_CLS4_FATTR_ID_BITS) - 1)
+#define MVPP2_CLS4_FATTR_ID_MASK(field) (MVPP2_CLS4_FATTR_ID_MAX << MVPP2_CLS4_FATTR_ID(field))
+#define MVPP2_CLS4_FATTR_ID_VAL(field, reg_val) ((reg_val & MVPP2_CLS4_FATTR_ID_MASK(field)) >> \
+ MVPP2_CLS4_FATTR_ID(field))
+
+#define MVPP2_CLS4_FATTR_OPCODE_BITS 3
+#define MVPP2_CLS4_FATTR_OPCODE(field) ((((field) * 9) % 27) + MVPP2_CLS4_FATTR_ID_BITS)
+#define MVPP2_CLS4_FATTR_OPCODE_MAX ((1 << MVPP2_CLS4_FATTR_OPCODE_BITS) - 1)
+#define MVPP2_CLS4_FATTR_OPCODE_MASK(field) (MVPP2_CLS4_FATTR_OPCODE_MAX << MVPP2_CLS4_FATTR_OPCODE(field))
+#define MVPP2_CLS4_FATTR_OPCODE_VAL(field, reg_val) ((reg_val & MVPP2_CLS4_FATTR_OPCODE_MASK(field)) >> \
+ MVPP2_CLS4_FATTR_OPCODE(field))
+
+#define MVPP2_CLS4_FDATA1_REG (0x1E58)
+#define MVPP2_CLS4_FDATA2_REG (0x1E5C)
+#define MVPP2_CLS4_FDATA3_REG (0x1E60)
+#define MVPP2_CLS4_FDATA4_REG (0x1E64)
+#define MVPP2_CLS4_FDATA5_REG (0x1E68)
+#define MVPP2_CLS4_FDATA6_REG (0x1E6C)
+#define MVPP2_CLS4_FDATA7_REG (0x1E70)
+#define MVPP2_CLS4_FDATA8_REG (0x1E74)
+#define MVPP2_CLS4_FDATA_REG(reg_num) (0x1E58 + (4 * (reg_num)))
+#define MVPP2_CLS4_FDATA_REGS_NUM 8
+
+#define MVPP2_CLS4_FDATA7_L3INFO 16
+#define MVPP2_CLS4_FDATA7_L3INFO_BITS 4
+#define MVPP2_CLS4_L3INFO_MAX ((1 << MVPP2_CLS4_FDATA7_L3INFO_BITS) - 1)
+#define MVPP2_CLS4_L3INFO_MASK (MVPP2_CLS4_L3INFO_MAX << MVPP2_CLS4_FDATA7_L3INFO)
+#define MVPP2_CLS4_L3INFO_VAL(reg_val) (((reg_val) & MVPP2_CLS4_L3INFO_MASK) >> \
+ MVPP2_CLS4_FDATA7_L3INFO)
+
+#define MVPP2_CLS4_FDATA7_L4INFO 20
+#define MVPP2_CLS4_FDATA7_L4INFO_BITS 4
+#define MVPP2_CLS4_L4INFO_MAX ((1 << MVPP2_CLS4_FDATA7_L4INFO_BITS) - 1)
+#define MVPP2_CLS4_L4INFO_MASK (MVPP2_CLS4_L4INFO_MAX << MVPP2_CLS4_FDATA7_L4INFO)
+#define MVPP2_CLS4_L4INFO_VAL(reg_val) (((reg_val) & MVPP2_CLS4_L4INFO_MASK) >> \
+ MVPP2_CLS4_FDATA7_L4INFO)
+
+#define MVPP2_CLS4_FDATA7_MACME 24
+#define MVPP2_CLS4_FDATA7_MACME_BITS 2
+#define MVPP2_CLS4_MACME_MAX ((1 << MVPP2_CLS4_FDATA7_MACME_BITS) - 1)
+#define MVPP2_CLS4_MACME_MASK (MVPP2_CLS4_MACME_MAX << MVPP2_CLS4_FDATA7_MACME)
+#define MVPP2_CLS4_MACME_VAL(reg_val) (((reg_val) & MVPP2_CLS4_MACME_MASK) >> MVPP2_CLS4_FDATA7_MACME)
+
+#define MVPP2_CLS4_FDATA7_PPPOE 26
+#define MVPP2_CLS4_FDATA7_PPPOE_BITS 2
+#define MVPP2_CLS4_PPPOE_MAX ((1 << MVPP2_CLS4_FDATA7_PPPOE_BITS) - 1)
+#define MVPP2_CLS4_PPPOE_MASK (MVPP2_CLS4_PPPOE_MAX << MVPP2_CLS4_FDATA7_PPPOE)
+#define MVPP2_CLS4_PPPOE_VAL(reg_val) (((reg_val) & MVPP2_CLS4_PPPOE_MASK) >> MVPP2_CLS4_FDATA7_PPPOE)
+
+#define MVPP2_CLS4_FDATA7_VLAN 28
+#define MVPP2_CLS4_FDATA7_VLAN_BITS 3
+#define MVPP2_CLS4_VLAN_MAX ((1 << MVPP2_CLS4_FDATA7_VLAN_BITS) - 1)
+#define MVPP2_CLS4_VLAN_MASK (MVPP2_CLS4_VLAN_MAX << MVPP2_CLS4_FDATA7_VLAN)
+#define MVPP2_CLS4_VLAN_VAL(reg_val) (((reg_val) & MVPP2_CLS4_VLAN_MASK) >> MVPP2_CLS4_FDATA7_VLAN)
+
+#define MVPP2_CLS4_ACT_REG (0x1E80)
+#define MVPP2_CLS4_ACT_QOS_ATTR_REG (0x1E84)
+#define MVPP2_CLS4_ACT_DUP_ATTR_REG (0x1E88)
+#define MVPP2_CNT_IDX_RULE(rule, set) ((rule) << 3 | (set))
+#define MVPP2_CLS_C4_TBL_HIT_REG (0x7708)
+
+/* Classifier C4 constants */
+#define MVPP2_CLS_C4_GRP_SIZE (8)
+#define MVPP2_CLS_C4_GRPS_NUM (8)
+#define MVPP2_CLS_C4_TBL_WORDS (10)
+#define MVPP2_CLS_C4_TBL_DATA_WORDS (8)
+#define MVPP2_CLS_C4_SRAM_WORDS (3)
+#define MVPP2_CLS_C4_FIELDS_NUM (6)
+
+/* C4 entry structure */
+struct mv_pp2x_cls_c4_entry {
+ u32 rule_index;
+ u32 set_index;
+ union {
+ u32 words[MVPP2_CLS_C4_TBL_WORDS];
+ struct {
+ u32 attr[MVPP2_CLS4_FATTR_REG_NUM];
+ u32 fdata_arr[MVPP2_CLS_C4_TBL_DATA_WORDS];
+ } regs;
+ } rules;
+ union {
+ u32 words[MVPP2_CLS_C4_SRAM_WORDS];
+ struct {
+ u32 actions;/* 0x1E80 */
+ u32 qos_attr;/* 0x1E84*/
+ u32 dup_attr;/* 0x1E88 */
+ } regs;
+ } sram;
+};
+
+/************** TX Packet Modification Registers *******************/
+#define MVPP2_PME_TBL_IDX_REG (0x8400)
+#define MVPP2_PME_TBL_INSTR_REG (0x8480)
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_TBL_DATA1_REG (0x8500)
+#define MVPP2_PME_TBL_DATA2_REG (0x8580)
+#define MVPP2_PME_TBL_DATA_BITS 16
+#define MVPP2_PME_TBL_DATA_OFFS(idx) ((idx == 0) ? MVPP2_PME_TBL_DATA_BITS : 0)
+#define MVPP2_PME_TBL_DATA_MASK(idx) (((1 << MVPP2_PME_TBL_DATA_BITS) - 1) << MVPP2_PME_TBL_DATA_OFFS(idx))
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_TBL_STATUS_REG (0x8600)
+#define MVPP2_PME_TCONT_THRESH_REG (0x8604)
+#define MVPP2_PME_MTU_REG (0x8608)
+
+#define MVPP2_PME_MAX_VLAN_ETH_TYPES 4
+#define MVPP2_PME_VLAN_ETH_TYPE_REG(i) (0x8610 + ((i) << 2))
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_DEF_VLAN_CFG_REG (0x8620)
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_MAX_DSA_ETH_TYPES 2
+#define MVPP2_PME_DEF_DSA_CFG_REG(i) (0x8624 + ((i) << 2))
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_DEF_DSA_SRC_DEV_REG (0x8630)
+#define MVPP2_PME_DSA_SRC_DEV_OFFS 1
+#define MVPP2_PME_DSA_SRC_DEV_BITS 4
+#define MVPP2_PME_DSA_SRC_DEV_ALL_MASK (((1 << MVPP2_PME_DSA_SRC_DEV_BITS) - 1) << MVPP2_PME_DSA_SRC_DEV_OFFS)
+#define MVPP2_PME_DSA_SRC_DEV_MASK(dev) ((dev) << MVPP2_PME_DSA_SRC_DEV_OFFS)
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_TTL_ZERO_FRWD_REG (0x8640)
+#define MVPP2_PME_TTL_ZERO_FRWD_BIT 0
+#define MVPP2_PME_TTL_ZERO_FRWD_MASK BIT(MVPP2_PME_TTL_ZERO_FRWD_BIT)
+/*--------------------------------------------------------------------------*/
+#define MVPP2_PME_PPPOE_ETYPE_REG (0x8650)
+#define MVPP2_PME_PPPOE_DATA_REG (0x8654)
+
+#define MVPP2_PME_PPPOE_CODE_OFFS 0
+#define MVPP2_PME_PPPOE_CODE_BITS 8
+#define MVPP2_PME_PPPOE_CODE_ALL_MASK (((1 << MVPP2_PME_PPPOE_CODE_BITS) - 1) << MVPP2_PME_PPPOE_CODE_OFFS)
+#define MVPP2_PME_PPPOE_CODE_MASK(code) (((code) << MVPP2_PME_PPPOE_CODE_OFFS) & MVPP2_PME_PPPOE_CODE_ALL_MASK)
+
+#define MVPP2_PME_PPPOE_TYPE_OFFS 8
+#define MVPP2_PME_PPPOE_TYPE_BITS 4
+#define MVPP2_PME_PPPOE_TYPE_ALL_MASK (((1 << MVPP2_PME_PPPOE_TYPE_BITS) - 1) << MVPP2_PME_PPPOE_TYPE_OFFS)
+#define MVPP2_PME_PPPOE_TYPE_MASK(type) (((type) << MVPP2_PME_PPPOE_TYPE_OFFS) & MVPP2_PME_PPPOE_TYPE_ALL_MASK)
+
+#define MVPP2_PME_PPPOE_VER_OFFS 12
+#define MVPP2_PME_PPPOE_VER_BITS 4
+#define MVPP2_PME_PPPOE_VER_ALL_MASK (((1 << MVPP2_PME_PPPOE_VER_BITS) - 1) << MVPP2_PME_PPPOE_VER_OFFS)
+#define MVPP2_PME_PPPOE_VER_MASK(ver) (((ver) << MVPP2_PME_PPPOE_VER_OFFS) & MVPP2_PME_PPPOE_VER_ALL_MASK)
+
+#define MVPP2_PME_PPPOE_LEN_REG (0x8658)
+#define MVPP2_PME_PPPOE_PROTO_REG (0x865c)
+
+#define MVPP2_PME_PPPOE_PROTO_OFFS(i) ((i == 0) ? 0 : 16)
+#define MVPP2_PME_PPPOE_PROTO_BITS (16)
+#define MVPP2_PME_PPPOE_PROTO_ALL_MASK(i) (((1 << MVPP2_PME_PPPOE_PROTO_BITS) - 1) << \
+ MVPP2_PME_PPPOE_PROTO_OFFS(i))
+#define MVPP2_PME_PPPOE_PROTO_MASK(i, p) (((p) << MVPP2_PME_PPPOE_PROTO_OFFS(i)) & \
+ MVPP2_PME_PPPOE_PROTO_ALL_MASK(i))
+
+#define MVPP2_PME_CONFIG_REG (0x8660)
+
+#define MVPP2_PME_MAX_HDR_SIZE_OFFS 0
+#define MVPP2_PME_MAX_HDR_SIZE_BITS 8
+#define MVPP2_PME_MAX_HDR_SIZE_ALL_MASK (((1 << MVPP2_PME_MAX_HDR_SIZE_BITS) - 1) << \
+ MVPP2_PME_MAX_HDR_SIZE_OFFS)
+#define MVPP2_PME_MAX_HDR_SIZE_MASK(size) (((size) << MVPP2_PME_MAX_HDR_SIZE_OFFS) & \
+ MVPP2_PME_MAX_HDR_SIZE_ALL_MASK)
+
+#define MVPP2_PME_MAX_INSTR_NUM_OFFS 16
+#define MVPP2_PME_MAX_INSTR_NUM_BITS 8
+#define MVPP2_PME_MAX_INSTR_NUM_ALL_MASK (((1 << MVPP2_PME_MAX_INSTR_NUM_BITS) - 1) << \
+ MVPP2_PME_MAX_INSTR_NUM_OFFS)
+#define MVPP2_PME_MAX_INSTR_NUM_MASK(num) (((num) << MVPP2_PME_MAX_INSTR_NUM_OFFS) & \
+ MVPP2_PME_MAX_INSTR_NUM_ALL_MASK)
+
+#define MVPP2_PME_DROP_ON_ERR_BIT 24
+#define MVPP2_PME_DROP_ON_ERR_MASK BIT(MVPP2_PME_DROP_ON_ERR_BIT)
+/*--------------------------------------------------------------------------*/
+
+#define MVPP2_PME_STATUS_1_REG (0x8664)
+#define MVPP2_PME_STATUS_2_REG(txp) (0x8700 + 4 * (txp))
+#define MVPP2_PME_STATUS_3_REG(txp) (0x8780 + 4 * (txp))
+
+/* PME insructions table (MV_PP2_PME_TBL_INSTR_REG) fields definition */
+#define MVPP2_PME_DATA_OFFS 0
+#define MVPP2_PME_DATA_BITS 16
+#define MVPP2_PME_DATA_MASK (((1 << MVPP2_PME_DATA_BITS) - 1) << MVPP2_PME_DATA_OFFS)
+
+#define MVPP2_PME_CTRL_OFFS 16
+#define MVPP2_PME_CTRL_BITS 16
+#define MVPP2_PME_CTRL_MASK (((1 << MVPP2_PME_CTRL_BITS) - 1) << MVPP2_PME_CTRL_OFFS)
+
+#define MVPP2_PME_CMD_OFFS 16
+#define MVPP2_PME_CMD_BITS 5
+#define MVPP2_PME_CMD_ALL_MASK (((1 << MVPP2_PME_CMD_BITS) - 1) << MVPP2_PME_CMD_OFFS)
+#define MVPP2_PME_CMD_MASK(cmd) ((cmd) << MVPP2_PME_CMD_OFFS)
+
+#define MVPP2_PME_IP4_CSUM_BIT 21
+#define MVPP2_PME_IP4_CSUM_MASK BIT(MVPP2_PME_IP4_CSUM_BIT)
+
+#define MVPP2_PME_L4_CSUM_BIT 22
+#define MVPP2_PME_L4_CSUM_MASK BIT(MVPP2_PME_L4_CSUM_BIT)
+
+#define MVPP2_PME_LAST_BIT 23
+#define MVPP2_PME_LAST_MASK BIT(MVPP2_PME_LAST_BIT)
+
+#define MVPP2_PME_CMD_TYPE_OFFS 24
+#define MVPP2_PME_CMD_TYPE_BITS 3
+#define MVPP2_PME_CMD_TYPE_ALL_MASK (((1 << MVPP2_PME_CMD_TYPE_BITS) - 1) << MVPP2_PME_CMD_TYPE_OFFS)
+#define MVPP2_PME_CMD_TYPE_MASK(type) ((type) << MVPP2_PME_CMD_TYPE_OFFS)
+
+#define MVPP2_TOTAL_TXP_NUM (16 + 3 - 1)
+
+/* PME data1 and data2 fields MVPP2_PME_TBL_DATA1_REG and MVPP2_PME_TBL_DATA2_REG */
+#define MVPP2_PME_TBL_DATA_BITS 16
+#define MVPP2_PME_TBL_DATA_OFFS(idx) ((idx == 0) ? MVPP2_PME_TBL_DATA_BITS : 0)
+#define MVPP2_PME_TBL_DATA_MASK(idx) (((1 << MVPP2_PME_TBL_DATA_BITS) - 1) << MVPP2_PME_TBL_DATA_OFFS(idx))
+
+/* TX packet modification constants */
+#define MVPP2_PME_INSTR_SIZE 2600
+#define MVPP2_PME_DATA1_SIZE (46 * 1024 / 2) /* 46KBytes = 23K data of 2 bytes */
+#define MVPP2_PME_DATA2_SIZE (4 * 1024 / 2) /* 4KBytes = 2K data of 2 bytes */
+
+enum mv_pp2x_pme_instr {
+ MVPP2_PME_CMD_NONE = 0,
+ MVPP2_PME_CMD_ADD_2B,
+ MVPP2_PME_CMD_CFG_VLAN,
+ MVPP2_PME_CMD_ADD_VLAN,
+ MVPP2_PME_CMD_CFG_DSA_1,
+ MVPP2_PME_CMD_CFG_DSA_2,
+ MVPP2_PME_CMD_ADD_DSA,
+ MVPP2_PME_CMD_DEL_BYTES,
+ MVPP2_PME_CMD_REPLACE_2B,
+ MVPP2_PME_CMD_REPLACE_LSB,
+ MVPP2_PME_CMD_REPLACE_MSB,
+ MVPP2_PME_CMD_REPLACE_VLAN,
+ MVPP2_PME_CMD_DEC_LSB,
+ MVPP2_PME_CMD_DEC_MSB,
+ MVPP2_PME_CMD_ADD_CALC_LEN,
+ MVPP2_PME_CMD_REPLACE_LEN,
+ MVPP2_PME_CMD_IPV4_CSUM,
+ MVPP2_PME_CMD_L4_CSUM,
+ MVPP2_PME_CMD_SKIP,
+ MVPP2_PME_CMD_JUMP,
+ MVPP2_PME_CMD_JUMP_SKIP,
+ MVPP2_PME_CMD_JUMP_SUB,
+ MVPP2_PME_CMD_PPPOE,
+ MVPP2_PME_CMD_STORE,
+ MVPP2_PME_CMD_ADD_IP4_CSUM,
+ MVPP2_PME_CMD_PPPOE_2,
+ MVPP2_PME_CMD_REPLACE_MID,
+ MVPP2_PME_CMD_ADD_MULT,
+ MVPP2_PME_CMD_REPLACE_MULT,
+ MVPP2_PME_CMD_REPLACE_REM_2B,
+ MVPP2_PME_CMD_ADD_IP6_HDR,
+ MVPP2_PME_CMD_DROP_PKT = 0x1f,
+ MVPP2_TMP_CMD_LAST
+};
+
+/* PME entry structure */
+struct mv_pp2x_pme_entry {
+ int index;
+ u32 word;
+};
+
+/* MC */
+/*-------------------------------------------------------------------------------*/
+#define MVPP2_MC_INDEX_REG (0x160)
+#define MVPP2_MC_INDEX_MAX ((1 << MVPP2_CLS2_ACT_DUP_ATTR_DUPID_BITS) - 1)
+/*------------------------------------------------------------------------------*/
+#define MVPP2_MC_DATA1_REG (0x164)
+#define MVPP2_MC_DATA1_DPTR 1
+#define MVPP2_MC_DATA1_IPTR 16
+/*------------------------------------------------------------------------------*/
+#define MVPP2_MC_DATA2_REG (0x168)
+#define MVPP2_MC_DATA2_GEM_ID 0
+#define MVPP2_MC_DATA2_PRI 12
+#define MVPP2_MC_DATA2_DSCP 15
+#define MVPP2_MC_DATA2_GEM_ID_EN BIT(21)
+#define MVPP2_MC_DATA2_PRI_EN BIT(22)
+#define MVPP2_MC_DATA2_DSCP_EN BIT(23)
+/*------------------------------------------------------------------------------*/
+#define MVPP2_MC_DATA3_REG (0x16C)
+#define MVPP2_MC_DATA3_QUEUE 0
+#define MVPP2_MC_DATA3_HWF_EN BIT(8)
+#define MVPP2_MC_DATA3_NEXT 16
+#define MVPP2_MC_DATA3_NEXT_MASK (MVPP2_MC_INDEX_MAX << MVPP2_MC_DATA3_NEXT)
+
+#define MVPP2_MC_TBL_SIZE 256
+#define MVPP2_MC_WORDS 3
+
+/* MC entry structure */
+struct mv_pp2x_mc_entry {
+ u32 index;
+ union {
+ u32 words[MVPP2_MC_WORDS];
+ struct {
+ u32 data1;/* 0x164 */
+ u32 data2;/* 0x168 */
+ u32 data3;/* 0x16c */
+ } regs;
+ } sram;
+};
+
+#endif /*_MVPP2_HW_TYPE_H_*/
+
+/* Policer */
+#define MVPP2_PLCR_NUM 48
+
+/*********************************** RX Policer Registers *******************/
+/* exist only in ppv2.0 */
+#define MVPP2_PLCR_ENABLE_REG (0x1300)
+
+#define MVPP2_PLCR_EN_OFFS 0
+#define MVPP2_PLCR_EN_ALL_MASK (((1 << MVPP2_PLCR_NUM) - 1) << MVPP2_PLCR_EN_OFFS)
+#define MVPP2_PLCR_EN_MASK(plcr) ((1 << (plcr)) << MVPP2_PLCR_EN_OFFS)
+/*--------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_BASE_PERIOD_REG (0x1304)
+
+#define MVPP2_PLCR_BASE_PERIOD_OFFS 0
+#define MVPP2_PLCR_BASE_PERIOD_BITS 16
+#define MVPP2_PLCR_BASE_PERIOD_ALL_MASK \
+ (((1 << MVPP2_PLCR_BASE_PERIOD_BITS) - 1) << MVPP2_PLCR_BASE_PERIOD_OFFS)
+#define MVPP2_PLCR_BASE_PERIOD_MASK(p) \
+ (((p) << MVPP2_PLCR_BASE_PERIOD_OFFS) & MVPP2_PLCR_BASE_PERIOD_ALL_MASK)
+
+#define MVPP2_PLCR_ADD_TOKENS_EN_BIT 16
+#define MVPP2_PLCR_ADD_TOKENS_EN_MASK BIT(MVPP2_PLCR_ADD_TOKENS_EN_BIT)
+/*--------------------------------------------------------------------------------------------*/
+#define MVPP2_PLCR_MODE_REG (0x1308)
+#define MVPP2_PLCR_MODE_BITS (3)
+#define MVPP2_PLCR_MODE_MASK (((1 << MVPP2_PLCR_MODE_BITS) - 1) << 0)
+
+/*---------------------------------------------------------------------------------------------*/
+/* exist only in ppv2.1*/
+#define MVPP2_PLCR_TABLE_INDEX_REG (0x130c)
+#define MVPP2_PLCR_COMMIT_TOKENS_REG (0x1310)
+#define MVPP2_PLCR_EXCESS_TOKENS_REG (0x1314)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_BUCKET_SIZE_REG (0x1318)
+
+#define MVPP2_PLCR_COMMIT_SIZE_OFFS 0
+#define MVPP2_PLCR_COMMIT_SIZE_BITS 16
+#define MVPP2_PLCR_COMMIT_SIZE_ALL_MASK \
+ (((1 << MVPP2_PLCR_COMMIT_SIZE_BITS) - 1) << MVPP2_PLCR_COMMIT_SIZE_OFFS)
+#define MVPP2_PLCR_COMMIT_SIZE_MASK(size) \
+ (((size) << MVPP2_PLCR_COMMIT_SIZE_OFFS) & MVPP2_PLCR_COMMIT_SIZE_ALL_MASK)
+
+#define MVPP2_PLCR_EXCESS_SIZE_OFFS 16
+#define MVPP2_PLCR_EXCESS_SIZE_BITS 16
+#define MVPP2_PLCR_EXCESS_SIZE_ALL_MASK \
+ (((1 << MVPP2_PLCR_EXCESS_SIZE_BITS) - 1) << MVPP2_PLCR_EXCESS_SIZE_OFFS)
+#define MVPP2_PLCR_EXCESS_SIZE_MASK(size) \
+ (((size) << MVPP2_PLCR_EXCESS_SIZE_OFFS) & MVPP2_PLCR_EXCESS_SIZE_ALL_MASK)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_TOKEN_CFG_REG (0x131c)
+
+#define MVPP2_PLCR_TOKEN_VALUE_OFFS 0
+#define MVPP2_PLCR_TOKEN_VALUE_BITS 10
+#define MVPP2_PLCR_TOKEN_VALUE_ALL_MASK \
+ (((1 << MVPP2_PLCR_TOKEN_VALUE_BITS) - 1) << MVPP2_PLCR_TOKEN_VALUE_OFFS)
+#define MVPP2_PLCR_TOKEN_VALUE_MASK(val) \
+ (((val) << MVPP2_PLCR_TOKEN_VALUE_OFFS) & MVPP2_PLCR_TOKEN_VALUE_ALL_MASK)
+
+#define MVPP2_PLCR_TOKEN_TYPE_OFFS 12
+#define MVPP2_PLCR_TOKEN_TYPE_BITS 3
+#define MVPP2_PLCR_TOKEN_TYPE_ALL_MASK \
+ (((1 << MVPP2_PLCR_TOKEN_TYPE_BITS) - 1) << MVPP2_PLCR_TOKEN_TYPE_OFFS)
+#define MVPP2_PLCR_TOKEN_TYPE_MASK(type) \
+ (((type) << MVPP2_PLCR_TOKEN_TYPE_OFFS) & MVPP2_PLCR_TOKEN_TYPE_ALL_MASK)
+
+#define MVPP2_PLCR_TOKEN_UNIT_BIT 31
+#define MVPP2_PLCR_TOKEN_UNIT_MASK BIT(MVPP2_PLCR_TOKEN_UNIT_BIT)
+#define MVPP2_PLCR_TOKEN_UNIT_BYTES (0 << MVPP2_PLCR_TOKEN_UNIT_BIT)
+#define MVPP2_PLCR_TOKEN_UNIT_PKTS BIT(MVPP2_PLCR_TOKEN_UNIT_BIT)
+
+#define MVPP2_PLCR_COLOR_MODE_BIT 30
+#define MVPP2_PLCR_COLOR_MODE_MASK BIT(MVPP2_PLCR_COLOR_MODE_BIT)
+#define MVPP2_PLCR_COLOR_MODE_BLIND (0 << MVPP2_PLCR_COLOR_MODE_BIT)
+#define MVPP2_PLCR_COLOR_MODE_AWARE BIT(MVPP2_PLCR_COLOR_MODE_BIT)
+
+#define MVPP2_PLCR_ENABLE_BIT 29
+#define MVPP2_PLCR_ENABLE_MASK BIT(MVPP2_PLCR_ENABLE_BIT)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_MIN_PKT_LEN_REG (0x1320)
+
+#define MVPP2_PLCR_MIN_PKT_LEN_OFFS 0
+#define MVPP2_PLCR_MIN_PKT_LEN_BITS 8
+#define MVPP2_PLCR_MIN_PKT_LEN_ALL_MASK \
+ (((1 << MVPP2_PLCR_MIN_PKT_LEN_BITS) - 1) << MVPP2_PLCR_MIN_PKT_LEN_OFFS)
+#define MVPP2_PLCR_MIN_PKT_LEN_MASK(len) \
+ (((len) << MVPP2_PLCR_MIN_PKT_LEN_OFFS) & MVPP2_PLCR_MIN_PKT_LEN_ALL_MASK)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_EDROP_EN_REG (0x1330)
+
+#define MVPP2_PLCR_EDROP_EN_BIT 0
+#define MVPP2_PLCR_EDROP_EN_MASK BIT(MVPP2_PLCR_EDROP_EN_BIT)
+/*---------------------------------------------------------------------------------------------*/
+/*ppv2.1 policer early drop threshold mechanism changed*/
+#define MVPP2_V0_PLCR_EDROP_THRESH_NUM 4
+
+#define MVPP2_V0_PLCR_EDROP_TR_OFFS(i) ((i % 2) ? 16 : 0)
+#define MVPP2_V0_PLCR_EDROP_TR_BITS 14
+#define MVPP2_V0_PLCR_EDROP_TR_ALL_MASK(i) \
+ (((1 << MVPP2_V0_PLCR_EDROP_TR_BITS) - 1) << MVPP2_V0_PLCR_EDROP_TR_OFFS(i))
+#define MVPP2_V0_PLCR_EDROP_TR_MASK(i, tr) \
+ (((tr) << MVPP2_V0_PLCR_EDROP_TR_OFFS(i)) & MVPP2_V0_PLCR_EDROP_TR_ALL_MASK(i))
+
+#define MVPP2_V0_PLCR_EDROP_CPU_TR_REG(i) (0x1340 + (((i) / 2) << 2))
+#define MVPP2_V0_PLCR_EDROP_HWF_TR_REG(i) (0x1350 + (((i) / 2) << 2))
+/*---------------------------------------------------------------------------------------------*/
+/*ppv2.1 policer early drop threshold new mechanism*/
+#define MVPP2_V1_PLCR_EDROP_THRESH_NUM 16
+
+#define MVPP2_V1_PLCR_EDROP_TR_OFFS 0
+#define MVPP2_V1_PLCR_EDROP_TR_BITS 14
+
+#define MVPP2_V1_PLCR_EDROP_TR_MASK(i) \
+ (((1 << MVPP2_V1_PLCR_EDROP_TR_BITS) - 1) << MVPP2_V1_PLCR_EDROP_TR_OFFS)
+
+#define MVPP2_V1_PLCR_EDROP_CPU_TR_REG(i) (0x1380 + ((i) * 4))
+#define MVPP2_V1_PLCR_EDROP_HWF_TR_REG(i) (0x13c0 + ((i) * 4))
+
+/*---------------------------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_EDROP_RXQ_REG (0x1348)
+#define MVPP2_PLCR_EDROP_RXQ_TR_REG (0x134c)
+/*--------------------------------------------------------------------------*/
+
+#define MVPP2_PLCR_EDROP_TXQ_REG (0x1358)
+#define MVPP2_PLCR_EDROP_TXQ_TR_REG (0x135c)
+/*--------------------------------------------------------------------------*/
+#define MVPP2_V1_PLCR_PKT_GREEN_REG(pol) (0x7400 + 4 * (pol))
+#define MVPP2_V1_PLCR_PKT_YELLOW_REG(pol) (0x7500 + 4 * (pol))
+#define MVPP2_V1_PLCR_PKT_RED_REG(pol) (0x7600 + 4 * (pol))
+/*---------------------------------------------------------------------------------------------*/
diff --git a/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_main.c b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_main.c
new file mode 100644
index 000000000000..c343f17d5e12
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2x/mv_pp2x_main.c
@@ -0,0 +1,5428 @@
+ /*
+ * ***************************************************************************
+ * Copyright (C) 2016 Marvell International Ltd.
+ * ***************************************************************************
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, either version 2 of the License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * ***************************************************************************
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/cpumask.h>
+#include <linux/kallsyms.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/busy_poll.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <dt-bindings/phy/phy-mvebu-comphy.h>
+
+#include "mv_pp2x.h"
+#include "mv_pp2x_hw.h"
+#include "mv_gop110_hw.h"
+
+#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
+#include <if_mv_pp2x_netmap.h>
+#endif
+
+#ifdef CONFIG_MV_PTP_SERVICE
+/* inline PTP procedures */
+#include <mv_pp2x_ptp_hook.c>
+/* non-inline init/config */
+#include <mv_ptp_if.h>
+#include <mv_ptp_service.h>
+#include <mv_pp2x_ptp_init.h>
+#endif
+
+#define MVPP2_SKB_TEST_SIZE 64
+#define MVPP2_ADDRESS 0xf2000000
+#define CPN110_ADDRESS_SPACE_SIZE (16 * 1024 * 1024)
+
+/* Declaractions */
+#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
+u8 mv_pp2x_num_cos_queues = 1;
+#else
+u8 mv_pp2x_num_cos_queues = 4;
+#endif
+static u8 mv_pp2x_queue_mode = MVPP2_QDIST_SINGLE_MODE;
+static u8 rss_mode;
+static u8 default_cpu;
+static u8 cos_classifer;
+static u32 pri_map = 0x3210; /* As default, cos0--rxq0, cos1--rxq1,
+ * cos2--rxq2, cos3--rxq3
+ */
+static u8 default_cos = 3; /* As default, non-IP packet has highest CoS value */
+static u16 rx_queue_size = MVPP2_MAX_RXD;
+static u16 tx_queue_size = MVPP2_MAX_TXD;
+static u16 buffer_scaling = 100;
+static u32 port_cpu_bind_map;
+static u8 first_bm_pool;
+static u8 first_addr_space;
+static u8 first_log_rxq_queue;
+static u8 uc_filter_max = 4;
+static u16 stats_delay_msec = STATS_DELAY;
+static u16 stats_delay;
+
+u32 debug_param;
+
+struct mv_pp2x_pool_attributes mv_pp2x_pools[] = {
+ {
+ .description = "short", /* pkt_size=MVPP2_BM_SHORT_PKT_SIZE */
+ .buf_num = MVPP2_BM_SHORT_BUF_NUM,
+ },
+ {
+ .description = "long", /* pkt_size=MVPP2_BM_LONG_PKT_SIZE */
+ .buf_num = MVPP2_BM_LONG_BUF_NUM,
+ },
+ {
+ .description = "jumbo", /* pkt_size=MVPP2_BM_JUMBO_PKT_SIZE */
+ .buf_num = MVPP2_BM_JUMBO_BUF_NUM,
+ }
+};
+
+module_param_named(num_cos_queues, mv_pp2x_num_cos_queues, byte, S_IRUGO);
+MODULE_PARM_DESC(num_cos_queues, "Set number of cos_queues (1-8), def=4");
+
+module_param_named(queue_mode, mv_pp2x_queue_mode, byte, S_IRUGO);
+MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
+
+module_param(rss_mode, byte, S_IRUGO);
+MODULE_PARM_DESC(rss_mode, "Set rss_mode (UDP_2T=0, UDP_5T=1)");
+
+module_param(default_cpu, byte, S_IRUGO);
+MODULE_PARM_DESC(default_cpu, "Set default CPU for non RSS frames");
+
+module_param(cos_classifer, byte, S_IRUGO);
+MODULE_PARM_DESC(cos_classifer,
+ "Cos Classifier (vlan_pri=0, dscp=1, vlan_dscp=2, dscp_vlan=3)");
+
+module_param(pri_map, uint, S_IRUGO);
+MODULE_PARM_DESC(pri_map, "Set priority_map, nibble for each cos.");
+
+module_param(default_cos, byte, S_IRUGO);
+MODULE_PARM_DESC(default_cos,
+ "Set default cos value(0-7) for unclassified traffic");
+
+module_param(rx_queue_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_queue_size, "Rx queue size");
+
+module_param(tx_queue_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(tx_queue_size, "Tx queue size");
+
+module_param(buffer_scaling, ushort, S_IRUGO);
+MODULE_PARM_DESC(buffer_scaling, "Buffer scaling (TBD)");
+
+module_param(uc_filter_max, byte, S_IRUGO);
+MODULE_PARM_DESC(uc_filter_max,
+ "Set unicast filter max size, it is multiple of 4. def=4");
+
+module_param(debug_param, uint, S_IRUGO);
+MODULE_PARM_DESC(debug_param,
+ "Ad-hoc parameter, which can be used for various debug operations.");
+
+module_param(stats_delay_msec, ushort, S_IRUGO);
+MODULE_PARM_DESC(stats_delay_msec, "Set statistic delay in msec, def=250");
+
+module_param_named(short_pool, mv_pp2x_pools[MVPP2_BM_SWF_SHORT_POOL].buf_num, uint, S_IRUGO);
+MODULE_PARM_DESC(short_pool, "Short pool size (0-8192), def=2048");
+
+module_param_named(long_pool, mv_pp2x_pools[MVPP2_BM_SWF_LONG_POOL].buf_num, uint, S_IRUGO);
+MODULE_PARM_DESC(long_pool, "Long pool size (0-8192), def=1024");
+
+module_param_named(jumbo_pool, mv_pp2x_pools[MVPP2_BM_SWF_JUMBO_POOL].buf_num, uint, S_IRUGO);
+MODULE_PARM_DESC(jumbo_pool, "Jumbo pool size (0-8192), def=512");
+
+/*TODO: Below module_params will not go to ML. Created for testing. */
+
+module_param(port_cpu_bind_map, uint, S_IRUGO);
+MODULE_PARM_DESC(port_cpu_bind_map,
+ "Set default port-to-cpu binding, nibble for each port. Relevant when queue_mode=multi-mode & rss is disabled");
+
+module_param(first_bm_pool, byte, S_IRUGO);
+MODULE_PARM_DESC(first_bm_pool, "First used buffer pool (0-11)");
+
+module_param(first_addr_space, byte, S_IRUGO);
+MODULE_PARM_DESC(first_addr_space, "First used PPV22 address space (0-8)");
+
+module_param(first_log_rxq_queue, byte, S_IRUGO);
+MODULE_PARM_DESC(first_log_rxq_queue, "First logical rx_queue (0-31)");
+
+void set_device_base_address(struct net_device *dev)
+{
+ struct mv_pp2x_port *port = netdev_priv(dev);
+
+ dev->mem_start = (unsigned long)port->priv->hw.phys_addr_start;
+ dev->mem_end = (unsigned long)port->priv->hw.phys_addr_end;
+}
+
+/* Number of RXQs used by single port */
+static int mv_pp2x_rxq_number;
+/* Number of TXQs used by single port */
+static int mv_pp2x_txq_number;
+
+static inline int mv_pp2x_txq_count(struct mv_pp2x_txq_pcpu *txq_pcpu)
+{
+ int index_modulo = (txq_pcpu->txq_put_index - txq_pcpu->txq_get_index +
+ txq_pcpu->size) % txq_pcpu->size;
+
+ return index_modulo;
+}
+
+static inline int mv_pp2x_txq_free_count(struct mv_pp2x_txq_pcpu *txq_pcpu)
+{
+ int index_modulo = (txq_pcpu->txq_get_index - txq_pcpu->txq_put_index +
+ txq_pcpu->size) % txq_pcpu->size;
+
+ if (index_modulo == 0)
+ return txq_pcpu->size;
+
+ return index_modulo;
+}
+
+static void mv_pp2x_txq_inc_get(struct mv_pp2x_txq_pcpu *txq_pcpu)
+{
+ if (txq_pcpu->txq_get_index == txq_pcpu->size - 1)
+ txq_pcpu->txq_get_index = 0;
+ else
+ txq_pcpu->txq_get_index++;
+}
+
+void mv_pp2x_txq_inc_error(struct mv_pp2x_txq_pcpu *txq_pcpu, int num)
+{
+ for (; num > 0; num--) {
+ if (txq_pcpu->txq_put_index < 1)
+ txq_pcpu->txq_put_index = txq_pcpu->size - 1;
+ else
+ txq_pcpu->txq_put_index--;
+ txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = 0;
+ txq_pcpu->data_size[txq_pcpu->txq_put_index] = 0;
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = 0;
+ }
+}
+
+void mv_pp2x_txq_inc_put(enum mvppv2_version pp2_ver,
+ struct mv_pp2x_txq_pcpu *txq_pcpu,
+ struct sk_buff *skb,
+ struct mv_pp2x_tx_desc *tx_desc)
+{
+ txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ txq_pcpu->data_size[txq_pcpu->txq_put_index] = tx_desc->data_size;
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ mv_pp2x_txdesc_phys_addr_get(pp2_ver, tx_desc);
+ if (txq_pcpu->txq_put_index == txq_pcpu->size - 1)
+ txq_pcpu->txq_put_index = 0;
+ else
+ txq_pcpu->txq_put_index++;
+#if defined(__BIG_ENDIAN)
+ if (pp2_ver == PPV21)
+ mv_pp21_tx_desc_swap(tx_desc);
+ else
+ mv_pp22_tx_desc_swap(tx_desc);
+#endif /* __BIG_ENDIAN */
+}
+
+static void mv_pp2x_txq_dec_put(struct mv_pp2x_txq_pcpu *txq_pcpu)
+{
+ if (txq_pcpu->txq_put_index == 0)
+ txq_pcpu->txq_put_index = txq_pcpu->size - 1;
+ else
+ txq_pcpu->txq_put_index--;
+}
+
+static u8 mv_pp2x_first_pool_get(struct mv_pp2x *priv)
+{
+ return priv->pp2_cfg.first_bm_pool;
+}
+
+static int mv_pp2x_pool_pkt_size_get(enum mv_pp2x_bm_pool_log_num log_id)
+{
+ return mv_pp2x_pools[log_id].pkt_size;
+}
+
+static int mv_pp2x_pool_buf_num_get(enum mv_pp2x_bm_pool_log_num log_id)
+{
+ return mv_pp2x_pools[log_id].buf_num;
+}
+
+char *mv_pp2x_pool_description_get(enum mv_pp2x_bm_pool_log_num log_id)
+{
+ return mv_pp2x_pools[log_id].description;
+}
+EXPORT_SYMBOL(mv_pp2x_pool_description_get);
+
+/* Buffer Manager configuration routines */
+static void *mv_pp2x_frag_alloc(const struct mv_pp2x_bm_pool *pool)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(pool->frag_size);
+ else
+ return kmalloc(pool->frag_size, GFP_ATOMIC);
+}
+
+static void mv_pp2x_frag_free(const struct mv_pp2x_bm_pool *pool, void *data)
+{
+ if (likely(pool->frag_size <= PAGE_SIZE))
+ skb_free_frag(data);
+ else
+ kfree(data);
+}
+
+static int mv_pp2x_rx_refill_new(struct mv_pp2x_port *port,
+ struct mv_pp2x_bm_pool *bm_pool,
+ u32 pool, int is_recycle, int cpu)
+{
+ dma_addr_t phys_addr;
+ void *data;
+
+ data = mv_pp2x_frag_alloc(bm_pool);
+ if (!data)
+ return -ENOMEM;
+
+ phys_addr = dma_map_single(port->dev->dev.parent, data,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+ mv_pp2x_frag_free(bm_pool, data);
+ return -ENOMEM;
+ }
+
+ mv_pp2x_pool_refill(port->priv, pool, phys_addr, cpu);
+ return 0;
+}
+
+/* Create pool */
+static int mv_pp2x_bm_pool_create(struct device *dev,
+ struct mv_pp2x_hw *hw,
+ struct mv_pp2x_bm_pool *bm_pool,
+ int size, int pkt_size)
+{
+ int size_bytes;
+
+ /* Driver enforces size= x16 both for PPv21 and for PPv22, even though
+ * PPv22 HW allows size= x8
+ */
+ if (!IS_ALIGNED(size, (1 << MVPP21_BM_POOL_SIZE_OFFSET)))
+ return -EINVAL;
+
+ /*YuvalC: Two pointers per buffer, existing bug fixed. */
+ size_bytes = 2 * sizeof(uintptr_t) * size;
+ bm_pool->virt_addr = dma_alloc_coherent(dev, size_bytes,
+ &bm_pool->phys_addr,
+ GFP_KERNEL);
+ if (!bm_pool->virt_addr)
+ return -ENOMEM;
+
+ if (!IS_ALIGNED((uintptr_t)bm_pool->virt_addr,
+ MVPP2_BM_POOL_PTR_ALIGN)) {
+ dma_free_coherent(dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ dev_err(dev, "BM pool %d is not %d bytes aligned\n",
+ bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+ return -ENOMEM;
+ }
+
+ mv_pp2x_bm_hw_pool_create(hw, bm_pool->id, bm_pool->phys_addr, size);
+
+ bm_pool->size = size;
+ bm_pool->pkt_size = pkt_size;
+ bm_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(
+ bm_pool->pkt_size)) + MVPP2_SKB_SHINFO_SIZE;
+ bm_pool->buf_num = 0;
+ mv_pp2x_bm_pool_bufsize_set(hw, bm_pool,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size));
+
+ return 0;
+}
+
+void mv_pp2x_bm_bufs_free(struct device *dev, struct mv_pp2x *priv,
+ struct mv_pp2x_bm_pool *bm_pool, int buf_num)
+{
+ int i;
+
+ if (buf_num > bm_pool->buf_num) {
+ WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
+ bm_pool->id, buf_num);
+ buf_num = bm_pool->buf_num;
+ }
+ for (i = 0; i < buf_num; i++) {
+ u8 *virt_addr;
+ dma_addr_t phys_addr;
+
+ /* Get buffer virtual address (indirect access) */
+ phys_addr = mv_pp2x_bm_phys_addr_get(&priv->hw, bm_pool->id);
+ if (!phys_addr)
+ break;
+ if (!bm_pool->external_pool) {
+ dma_unmap_single(dev, phys_addr,
+ MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), DMA_TO_DEVICE);
+ virt_addr = phys_to_virt(dma_to_phys(dev, phys_addr));
+ mv_pp2x_frag_free(bm_pool, virt_addr);
+ }
+ }
+
+ /* Update BM driver with number of buffers removed from pool */
+ bm_pool->buf_num -= i;
+}
+
+/* Cleanup pool */
+int mv_pp2x_bm_pool_destroy(struct device *dev, struct mv_pp2x *priv,
+ struct mv_pp2x_bm_pool *bm_pool)
+{
+ u32 val;
+ int size_bytes, buf_num;
+
+ buf_num = mv_pp2x_check_hw_buf_num(priv, bm_pool);
+
+ mv_pp2x_bm_bufs_free(dev, priv, bm_pool, buf_num);
+
+ /* Check buffer counters after free */
+ buf_num = mv_pp2x_check_hw_buf_num(priv, bm_pool);
+
+ if (buf_num) {
+ WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
+ bm_pool->id,
+ bm_pool->buf_num);
+ return 0;
+ }
+
+ val = mv_pp2x_read(&priv->hw, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+ val |= MVPP2_BM_STOP_MASK;
+ mv_pp2x_write(&priv->hw, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+ size_bytes = 2 * sizeof(uintptr_t) * bm_pool->size;
+ dma_free_coherent(dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ mv_pp2x_bm_pool_bufsize_set(&priv->hw, bm_pool, 0);
+ priv->num_pools--;
+ return 0;
+}
+
+int mv_pp2x_bm_pool_ext_add(struct device *dev, struct mv_pp2x *priv,
+ u32 *pool_num, u32 pkt_size)
+{
+ int err, size, enabled;
+ u8 first_pool = mv_pp2x_first_pool_get(priv);
+ u32 pool = priv->num_pools;
+ struct mv_pp2x_bm_pool *bm_pool;
+ struct mv_pp2x_hw *hw = &priv->hw;
+
+ if ((priv->num_pools + 1) > MVPP2_BM_POOLS_MAX_ALLOC_NUM) {
+ dev_err(dev, "Unable to add pool. Max BM pool alloc reached %d\n",
+ priv->num_pools + 1);
+ return -ENOMEM;
+ }
+
+ /* Check if pool is already active. Ignore request */
+ enabled = mv_pp2x_read(hw, MVPP2_BM_POOL_CTRL_REG(pool)) &
+ MVPP2_BM_STATE_MASK;
+
+ if (enabled) {
+ dev_info(dev, "%s pool %d already enabled. Ignoring request\n",
+ __func__, pool);
+ return 0;
+ }
+
+ /* Mask BM interrupts */
+ mv_pp2x_write(&priv->hw, MVPP2_BM_INTR_MASK_REG(first_pool +
+ pool), 0);
+ /* Clear BM cause register */
+ mv_pp2x_write(&priv->hw, MVPP2_BM_INTR_CAUSE_REG(first_pool +
+ pool), 0);
+
+ /* Create all pools with maximum size */
+ size = MVPP2_BM_POOL_SIZE_MAX;
+ bm_pool = &priv->bm_pools[pool];
+ bm_pool->log_id = pool;
+ bm_pool->id = first_pool + pool;
+ bm_pool->external_pool = true;
+ err = mv_pp2x_bm_pool_create(dev, hw, bm_pool, size, pkt_size);
+ if (err)
+ return err;
+
+ *pool_num = pool;
+ priv->num_pools++;
+ return 0;
+}
+
+static int mv_pp2x_bm_pools_init(struct platform_device *pdev,
+ struct mv_pp2x *priv,
+ u8 first_pool, u8 num_pools)
+{
+ int i, err, size;
+ struct mv_pp2x_bm_pool *bm_pool;
+ struct mv_pp2x_hw *hw = &priv->hw;
+
+ /* Create all pools with maximum size */
+ size = MVPP2_BM_POOL_SIZE_MAX;
+ for (i = 0; i < num_pools; i++) {
+ bm_pool = &priv->bm_pools[i];
+ bm_pool->log_id = i;
+ bm_pool->id = first_pool + i;
+ bm_pool->external_pool = false;
+ err = mv_pp2x_bm_pool_create(&pdev->dev, hw, bm_pool, size,
+ mv_pp2x_pool_pkt_size_get(bm_pool->log_id));
+ if (err)
+ goto err_unroll_pools;
+ }
+ priv->num_pools = num_pools;
+ return 0;
+
+err_unroll_pools:
+ dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+ for (i = i - 1; i >= 0; i--)
+ mv_pp2x_bm_pool_destroy(&pdev->dev, priv, &priv->bm_pools[i]);
+ return err;
+}
+
+static int mv_pp2x_bm_init(struct platform_device *pdev, struct mv_pp2x *priv)
+{
+ int i, err;
+ u8 first_pool = mv_pp2x_first_pool_get(priv);
+ u8 num_pools = MVPP2_BM_SWF_NUM_POOLS;
+
+ for (i = first_pool; i < (first_pool + num_pools); i++) {
+ /* Mask BM all interrupts */
+ mv_pp2x_write(&priv->hw, MVPP2_BM_INTR_MASK_REG(i), 0);
+ /* Clear BM cause register */
+ mv_pp2x_write(&priv->hw, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+ }
+
+ /* Allocate and initialize BM pools */
+ priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+ sizeof(struct mv_pp2x_bm_pool),
+ GFP_KERNEL);
+ if (!priv->bm_pools)
+ return -ENOMEM;
+
+ err = mv_pp2x_bm_pools_init(pdev, priv, first_pool, num_pools);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+/* Allocate buffers for the pool */
+int mv_pp2x_bm_bufs_add(struct mv_pp2x_port *port,
+ struct mv_pp2x_bm_pool *bm_pool, int buf_num)
+{
+ int i, buf_size, total_size, cpu;
+
+ buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+ total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+ if (buf_num < 0 ||
+ (buf_num + bm_pool->buf_num > bm_pool->size)) {
+ netdev_err(port->dev,
+ "cannot allocate %d buffers for pool %d\n",
+ buf_num, bm_pool->id);
+ return 0;
+ }
+
+ cpu = get_cpu();
+ for (i = 0; i < buf_num; i++)
+ mv_pp2x_rx_refill_new(port, bm_pool, (u32)bm_pool->id, 0, cpu);
+ put_cpu();
+
+ /* Update BM driver with number of buffers added to pool */
+ bm_pool->buf_num += i;
+ bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+ netdev_dbg(port->dev,
+ "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+ mv_pp2x_pool_description_get(bm_pool->log_id),
+ bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+ netdev_dbg(port->dev,
+ "%s pool %d: %d of %d buffers added\n",
+ mv_pp2x_pool_description_get(bm_pool->log_id),
+ bm_pool->id, i, buf_num);
+ return i;
+}
+
+static int mv_pp2x_bm_buf_calc(enum mv_pp2x_bm_pool_log_num log_pool,
+ u32 port_map)
+{
+ /*TODO: Code algo based on
+ * port_map/num_rx_queues/num_tx_queues/queue_sizes
+ */
+ int num_ports = hweight_long(port_map);
+
+ return(num_ports * mv_pp2x_pool_buf_num_get(log_pool));
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+
+static struct mv_pp2x_bm_pool *mv_pp2x_bm_pool_use_internal(
+ struct mv_pp2x_port *port, enum mv_pp2x_bm_pool_log_num log_pool,
+ bool add_port)
+{
+ int pkts_num, add_num, num;
+ struct mv_pp2x_bm_pool *pool = &port->priv->bm_pools[log_pool];
+
+ if (log_pool < MVPP2_BM_SWF_SHORT_POOL ||
+ log_pool > MVPP2_BM_SWF_JUMBO_POOL) {
+ netdev_err(port->dev, "pool does not exist\n");
+ return NULL;
+ }
+
+ if (add_port) {
+ pkts_num = mv_pp2x_bm_buf_calc(log_pool,
+ pool->port_map |
+ (1 << port->id));
+ } else {
+ pkts_num = mv_pp2x_bm_buf_calc(log_pool,
+ pool->port_map &
+ ~(1 << port->id));
+ }
+
+ add_num = pkts_num - pool->buf_num;
+
+ /* Allocate buffers for this pool */
+ if (add_num > 0) {
+ num = mv_pp2x_bm_bufs_add(port, pool, add_num);
+ if (num != add_num) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ pool->id, num, pkts_num);
+ /* We need to undo the bufs_add() allocations */
+ return NULL;
+ }
+ } else if (add_num < 0) {
+ mv_pp2x_bm_bufs_free(port->dev->dev.parent, port->priv, pool, -add_num);
+ }
+
+ return pool;
+}
+
+static struct mv_pp2x_bm_pool *mv_pp2x_bm_pool_use(
+ struct mv_pp2x_port *port,
+ enum mv_pp2x_bm_pool_log_num log_pool)
+{
+ return mv_pp2x_bm_pool_use_internal(port, log_pool, true);
+}
+
+static struct mv_pp2x_bm_pool *mv_pp2x_bm_pool_stop_use(
+ struct mv_pp2x_port *port,
+ enum mv_pp2x_bm_pool_log_num log_pool)
+{
+ return mv_pp2x_bm_pool_use_internal(port, log_pool, false);
+}
+
+int mv_pp2x_swf_bm_pool_assign(struct mv_pp2x_port *port, u32 rxq,
+ u32 long_id, u32 short_id)
+{
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ if (rxq >= port->num_rx_queues)
+ return -ENOMEM;
+
+ port->priv->pp2xdata->mv_pp2x_rxq_long_pool_set(hw,
+ port->rxqs[rxq]->id, long_id);
+ port->priv->pp2xdata->mv_pp2x_rxq_short_pool_set(hw,
+ port->rxqs[rxq]->id, short_id);
+ return 0;
+}
+
+/* Initialize pools for swf */
+static int mv_pp2x_swf_bm_pool_init(struct mv_pp2x_port *port)
+{
+ int rxq;
+ enum mv_pp2x_bm_pool_log_num long_log_pool;
+ struct mv_pp2x_hw *hw = &port->priv->hw;
+
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ long_log_pool = MVPP2_BM_SWF_JUMBO_POOL;
+ else
+ long_log_pool = MVPP2_BM_SWF_LONG_POOL;
+
+ if (!port->pool_long) {
+ port->pool_long =
+ mv_pp2x_bm_pool_use(port, long_log_pool);
+ if (!port->pool_long)
+ return -ENOMEM;
+ port->pool_long->port_map |= (1 << port->id);
+
+ for (rxq = 0; rxq < port->num_rx_queues; rxq++) {
+ port->priv->pp2xdata->mv_pp2x_rxq_long_pool_set(hw,
+ port->rxqs[rxq]->id, port->pool_long->id);
+ }
+ }
+
+ if (!port->pool_short) {
+ port->pool_short =
+ mv_pp2x_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL);
+ if (!po