summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c32
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c96
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c137
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c11
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c38
-rw-r--r--drivers/net/ethernet/sfc/efx.c17
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c41
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c5
69 files changed, 570 insertions, 383 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 16c490692f42..12083b9679b5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -672,6 +672,18 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
+static struct sk_buff *
+bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
+{
+ struct sk_buff *skb;
+
+ if (fp->rx_frag_size)
+ skb = build_skb(data, fp->rx_frag_size);
+ else
+ skb = slab_build_skb(data);
+ return skb;
+}
+
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
@@ -779,7 +791,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_data))
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
@@ -1046,7 +1058,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (unlikely(!skb)) {
bnx2x_frag_free(fp, data);
bnx2x_fp_qstats(bp, fp)->
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e2e2c986c82b..c23e3b397bcf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -175,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
- { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c0628ac1b798..5928430f6f51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1226,6 +1226,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
+#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
u16 support_speeds;
u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index ec573127b707..6bd18eb5137f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1714,6 +1714,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
return SPEED_100000;
+ case BNXT_LINK_SPEED_200GB:
+ return SPEED_200000;
default:
return SPEED_UNKNOWN;
}
@@ -3738,6 +3740,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_ulp_stop(bp);
rc = bnxt_close_nic(bp, true, false);
if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
bnxt_ulp_start(bp, rc);
return;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bca68edfbe9c..da9d4b310fcd 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -370,8 +370,7 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
};
static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
- struct ethtool_rmon_stats *s,
- const struct ethtool_rmon_hist_range **ranges)
+ struct ethtool_rmon_stats *s)
{
s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
@@ -393,8 +392,6 @@ static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
-
- *ranges = enetc_rmon_ranges;
}
static void enetc_get_eth_mac_stats(struct net_device *ndev,
@@ -447,13 +444,15 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
struct enetc_hw *hw = &priv->si->hw;
struct enetc_si *si = priv->si;
+ *ranges = enetc_rmon_ranges;
+
switch (rmon_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 0, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (si->hw_features & ENETC_SI_F_QBU)
- enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ enetc_rmon_stats(hw, 1, rmon_stats);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
ethtool_aggregate_rmon_stats(ndev, rmon_stats);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5ba1e0d71c68..9939ccafb556 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -507,6 +507,11 @@ struct bufdesc_ex {
/* i.MX6Q adds pm_qos support */
#define FEC_QUIRK_HAS_PMQOS BIT(23)
+/* Not all FEC hardware block MDIOs support accesses in C45 mode.
+ * Older blocks in the ColdFire parts do not support it.
+ */
+#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f3b16a6673e2..160c1b3525f5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,18 +100,19 @@ struct fec_devinfo {
static const struct fec_devinfo fec_imx25_info = {
.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx27_info = {
- .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_NO_HARD_RESET,
+ FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6q_info = {
@@ -119,11 +120,12 @@ static const struct fec_devinfo fec_imx6q_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_HAS_PMQOS,
+ FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_mvf600_info = {
- .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6x_info = {
@@ -132,7 +134,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
- FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -140,7 +143,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8mq_info = {
@@ -150,7 +154,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8qm_info = {
@@ -160,14 +165,15 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_s32v234_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static struct platform_device_id fec_devtype[] = {
@@ -2434,8 +2440,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->name = "fec_enet_mii_bus";
fep->mii_bus->read = fec_enet_mdio_read_c22;
fep->mii_bus->write = fec_enet_mdio_write_c22;
- fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
- fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ }
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 64eb0442c82f..005cb9dfe078 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -47,6 +47,8 @@
#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index ce574d097e28..5f81470843b4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- int err = gve_adminq_report_link_speed(priv);
+ int err = 0;
+
+ if (priv->link_speed == 0)
+ err = gve_adminq_report_link_speed(priv);
cmd->base.speed = priv->link_speed;
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4888bf05fbed..5e11b8236754 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -284,8 +284,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
int bytes;
int hlen;
- hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
- tcp_hdrlen(skb) : skb_headlen(skb);
+ hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
hlen);
@@ -454,13 +454,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
pkt_desc = &tx->desc[idx];
l4_hdr_offset = skb_checksum_start_offset(skb);
- /* If the skb is gso, then we want the tcp header in the first segment
- * otherwise we want the linear portion of the skb (which will contain
- * the checksum because skb->csum_start and skb->csum_offset are given
- * relative to skb->head) in the first segment.
+ /* If the skb is gso, then we want the tcp header alone in the first segment
+ * otherwise we want the minimum required by the gVNIC spec.
*/
hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
- skb_headlen(skb);
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
info->skb = skb;
/* We don't want to split the header, so if necessary, pad to the end
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 5b3519c6e362..97fe1787a8f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -44,7 +44,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return 0;
}
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@@ -78,27 +78,28 @@ int i40e_diag_reg_test(struct i40e_hw *hw)
{
int ret_code = 0;
u32 reg, mask;
+ u32 elements;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
+ elements = i40e_reg_list[i].elements;
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
- i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
- i40e_reg_list[i].elements =
- hw->func_caps.num_msix_vectors - 1;
+ elements = hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
+ for (j = 0; j < elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index e641035c7297..c3ce5f35211f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
int i40e_diag_reg_test(struct i40e_hw *hw);
int i40e_diag_eeprom_test(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 924f972b91fa..72b091f2509d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
struct i40e_fdir_filter *data)
{
bool is_vlan = !!data->vlan_tag;
- struct vlan_hdr vlan;
- struct ipv6hdr ipv6;
- struct ethhdr eth;
- struct iphdr ip;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
u8 *tmp;
if (ipv4) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 16c490965b61..dd11dbbd5551 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 3273aeb8fa67..095201e83c9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* Do not track VLAN 0 filter, always added by the PF on VF init */
+ if (!vid)
+ return 0;
+
if (!VLAN_FILTERING_ALLOWED(adapter))
return -EIO;
@@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ /* We do not track VLAN 0 filter */
+ if (!vid)
+ return 0;
+
iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
if (proto == cpu_to_be16(ETH_P_8021Q))
clear_bit(vid, adapter->vsi.active_cvlans);
@@ -5066,6 +5074,11 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_unlock(&adapter->crit_lock);
break;
}
+ /* Simply return if we already went through iavf_shutdown */
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
mutex_unlock(&adapter->crit_lock);
usleep_range(500, 1000);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 18b6a702a1d6..e989feda133c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 6d23338604bb..4e17d006c52d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2446,8 +2446,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
if (f->is_new_vlan) {
f->is_new_vlan = false;
- if (!f->vlan.vid)
- continue;
if (f->vlan.tpid == ETH_P_8021Q)
set_bit(f->vlan.vid,
adapter->vsi.active_cvlans);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0f52ea38b6f3..450317dfcca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,6 +291,7 @@ static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
struct ice_vsi_ctx *ctxt;
int status;
+ ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
@@ -2892,7 +2893,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
- ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c233464b8f6b..0d8b8c6f9bd3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4641,6 +4641,12 @@ static int ice_start_eth(struct ice_vsi *vsi)
return err;
}
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+ ice_fltr_remove_all(vsi);
+ ice_vsi_close(vsi);
+}
+
static int ice_init_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -5129,7 +5135,7 @@ void ice_unload(struct ice_pf *pf)
{
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- ice_vsi_close(ice_get_main_vsi(pf));
+ ice_stop_eth(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf));
ice_deinit_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 4eca8d195ef0..b7682de0ae05 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2788,7 +2788,7 @@ static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
- struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+ struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
struct ice_hw *hw = pi->hw;
int status = 0;
@@ -2806,11 +2806,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp;
- list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+ list_for_each_entry_safe(iter, vtmp,
&old_agg_info->agg_vsi_list,
list_entry)
- if (old_agg_vsi_info->vsi_handle == vsi_handle)
+ if (iter->vsi_handle == vsi_handle) {
+ old_agg_vsi_info = iter;
break;
+ }
}
/* check if entry already exist */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 96a64c25e2ef..0cc05e54a781 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1341,15 +1341,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
if (ice_is_eswitch_mode_switchdev(pf)) {
dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
return -EOPNOTSUPP;
}
- vf = ice_get_vf_by_id(pf, vf_id);
- if (!vf)
- return -EINVAL;
-
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 61f844d22512..46b36851af46 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
- struct ice_vsi_ctx *ctx;
+ struct ice_vsi_ctx *ctx, *cached_ctx;
+ int status;
+
+ cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!cached_ctx)
+ return -ENOENT;
- ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -EIO;
+ return -ENOMEM;
+
+ ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
+ ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
+ ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_update_vsi(hw, vsi_handle, ctx, NULL);
+ status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
+ if (!status) {
+ cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
+ cached_ctx->info.valid_sections |= ctx->info.valid_sections;
+ }
+
+ kfree(ctx);
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dfd22862e926..4fcf2d07eb85 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -938,6 +938,7 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @ntc: index of next to clean element
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
@@ -1026,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
@@ -1210,6 +1210,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt)
ntc = 0;
+ rx_ring->first_desc = ntc;
continue;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7bc5aa340c7d..c8322fb6f2b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -438,6 +438,7 @@ busy:
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
+ * @first_idx: index to write from caller
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index e6ef6b303222..daa6a1e894cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -542,6 +542,87 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
}
/**
+ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
+ * @fdir: pointer to the VF FDIR structure
+ */
+static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
+}
+
+/**
+ * ice_vc_fdir_has_prof_conflict
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if @conf has conflicting profile with existing profiles
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct ice_fdir_fltr *desc;
+
+ list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+ struct virtchnl_fdir_fltr_conf *existing_conf;
+ enum ice_fltr_ptype flow_type_a, flow_type_b;
+ struct ice_fdir_fltr *a, *b;
+
+ existing_conf = to_fltr_conf_from_desc(desc);
+ a = &existing_conf->input;
+ b = &conf->input;
+ flow_type_a = a->flow_type;
+ flow_type_b = b->flow_type;
+
+ /* No need to compare two rules with different tunnel types or
+ * with the same protocol type.
+ */
+ if (existing_conf->ttype != conf->ttype ||
+ flow_type_a == flow_type_b)
+ continue;
+
+ switch (flow_type_a) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/**
* ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure
* @flow: filter flow type
@@ -677,6 +758,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
enum ice_fltr_ptype flow;
int ret;
+ ret = ice_vc_fdir_has_prof_conflict(vf, conf);
+ if (ret) {
+ dev_dbg(dev, "Found flow profile conflict for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) {
@@ -1798,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
- goto err_free_conf;
+ goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
@@ -1807,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
- goto err_rem_entry;
+ goto err_clr_irq;
}
exit:
kfree(stat);
return ret;
-err_rem_entry:
+err_clr_irq:
ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
err_free_conf:
devm_kfree(dev, conf);
@@ -1924,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
spin_lock_init(&fdir->ctx_lock);
fdir->ctx_irq.flags = 0;
fdir->ctx_done.flags = 0;
+ ice_vc_fdir_reset_cnt_all(fdir);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03bc1e8af575..274c781b5547 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
+static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
int igb_close(struct net_device *);
@@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
-static int igb_disable_sriov(struct pci_dev *dev);
-static int igb_pci_disable_sriov(struct pci_dev *dev);
+static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
static int igb_suspend(struct device *);
@@ -3665,7 +3664,7 @@ err_sw_init:
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
+ igb_disable_sriov(pdev, false);
#endif
pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
@@ -3679,7 +3678,38 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+ else
+ igb_reset(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev)
adapter->flags |= IGB_FLAG_DMAC;
}
- return 0;
+ return reinit ? igb_sriov_reinit(pdev) : 0;
}
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
"Unable to allocate memory for VF MAC filter list\n");
}
- /* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs) {
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
- }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* DMA Coalescing is not supported in IOV mode. */
adapter->flags &= ~IGB_FLAG_DMAC;
+
+ if (reinit) {
+ err = igb_sriov_reinit(pdev);
+ if (err)
+ goto err_out;
+ }
+
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs)
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+
goto out;
err_out:
@@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
- rtnl_lock();
- igb_disable_sriov(pdev);
- rtnl_unlock();
+ igb_disable_sriov(pdev, false);
#endif
unregister_netdev(netdev);
@@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
igb_reset_interrupt_capability(adapter);
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs, false);
#endif /* CONFIG_PCI_IOV */
}
@@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
-{
- struct net_device *netdev = pci_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- rtnl_lock();
-
- if (netif_running(netdev))
- igb_close(netdev);
- else
- igb_reset(adapter);
-
- igb_clear_interrupt_scheme(adapter);
-
- igb_init_queue_configuration(adapter);
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- rtnl_unlock();
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- if (netif_running(netdev))
- igb_open(netdev);
-
- rtnl_unlock();
-
- return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
- int err = igb_disable_sriov(dev);
-
- if (!err)
- err = igb_sriov_reinit(dev);
-
- return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
- int err = igb_enable_sriov(dev, num_vfs);
-
- if (err)
- goto out;
-
- err = igb_sriov_reinit(dev);
- if (!err)
- return num_vfs;
-
-out:
- return err;
-}
-
-#endif
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
- if (num_vfs == 0)
- return igb_pci_disable_sriov(dev);
- else
- return igb_pci_enable_sriov(dev, num_vfs);
+ int err;
+
+ if (num_vfs == 0) {
+ return igb_disable_sriov(dev, true);
+ } else {
+ err = igb_enable_sriov(dev, num_vfs, true);
+ return err ? err : num_vfs;
+ }
#endif
return 0;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 3a32809510fc..72cb1b56e9f2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/etherdevice.h>
+
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 2928a6c73692..25fc6c65209b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- if (e->gate_mask & BIT(i))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (e->gate_mask & BIT(i)) {
queue_uses[i]++;
- /* There are limitations: A single queue cannot be
- * opened and closed multiple times per cycle unless the
- * gate stays open. Check for it.
- */
- if (queue_uses[i] > 1 &&
- !(prev->gate_mask & BIT(i)))
- return false;
- }
+ /* There are limitations: A single queue cannot
+ * be opened and closed multiple times per cycle
+ * unless the gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
+ return false;
+ }
}
return true;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0e39d199ff06..2cad76d0a50e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3549,6 +3549,8 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
netdev_tx_reset_queue(nq);
+ txq->buf = NULL;
+ txq->tso_hdrs = NULL;
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 41d935d1aaf6..40aeaa7bd739 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 75ba57bd1d46..9af22f497a40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
if (!priv->prs_double_vlans)
return -ENOMEM;
- /* Double VLAN: 0x8100, 0x88A8 */
- err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+ /* Double VLAN: 0x88A8, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
if (err)
return err;
@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
-
- /* IPv4 over PPPoE with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, PPP_IP);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ int tid, ihl;
- /* IPv4 over PPPoE without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
+ /* IPv4 over PPPoE with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- pe.index = tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD |
- MVPP2_PRS_IPV4_IHL_MIN,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
+ mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* IPv6 over PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7f8ffbf79cf7..ab126f8706c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -709,6 +709,7 @@ err_unreg_netdev:
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
@@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3cb43623d3db..e14050e17862 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -753,6 +753,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
+ mac->speed = speed;
switch (speed) {
case SPEED_2500:
case SPEED_1000:
@@ -763,8 +764,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
break;
}
- mtk_set_queue_speed(mac->hw, mac->id, speed);
-
/* Configure duplex */
if (duplex == DUPLEX_FULL)
mcr |= MAC_MCR_FORCE_DPX;
@@ -2059,9 +2058,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe[0], skb, hash);
-
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (trxd.rxd3 & RX_DMA_VTAG_V2) {
@@ -2089,6 +2085,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -3237,6 +3236,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
+ if (mac->speed > 0 && mac->speed <= s.base.speed)
+ s.base.speed = 0;
+
mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 6883eb34cd8b..fd07d6e14273 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -8,6 +8,7 @@
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <net/dst_metadata.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
@@ -458,6 +459,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
+ mtk_ppe_cache_clear(ppe);
}
entry->hash = 0xffff;
@@ -699,7 +701,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
goto out;
- tag += 4;
+ if (!skb_metadata_dst(skb))
+ tag += 4;
+
if (get_unaligned_be16(tag) != ETH_P_8021Q)
break;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 81afd5ee3fbf..161751bb36c9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -576,6 +576,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
+ flow_block_cb_incref(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &block_cb_list);
return 0;
@@ -584,7 +585,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (!block_cb)
return -ENOENT;
- if (flow_block_cb_decref(block_cb)) {
+ if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 0869d4fff17b..4b5e459b6d49 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -674,7 +674,7 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
mlx4_en_get_cqe_ts(_ctx->cqe));
@@ -686,7 +686,7 @@ int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index bcd6370de440..c5dae48b7932 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -162,7 +162,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
_ctx->rq->clock, get_cqe_ts(_ctx->cqe));
@@ -174,7 +174,7 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
- return -EOPNOTSUPP;
+ return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 8af53178e40d..33b3620ea45c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
+ unsigned long expires;
int err;
aso = &macsec->aso;
@@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ err = mlx5_aso_poll_cq(maso, false);
+ if (err)
+ usleep_range(2, 10);
+ } while (err && time_is_after_jiffies(expires));
+
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 2449731b7d79..89de92d06483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < ets->ets_cap; i++) {
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
+ }
+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a7f2ab22cc40..7ca7e9b57607 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4150,8 +4150,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
+ features |= NETIF_F_NETNS_LOCAL;
+ } else {
+ features &= ~NETIF_F_NETNS_LOCAL;
+ }
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6bfed633343a..87a2850b32d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1103,8 +1103,8 @@ static void
mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
struct mlx5_core_dev *mdev)
{
+ u32 link_speed = 0;
u64 link_speed64;
- u32 link_speed;
hairpin_params->mdev = mdev;
/* set hairpin pair per each 50Gbs share of the link */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index d55775627a47..50d2ea323979 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vpo
if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
- err = PTR_ERR(vport);
- goto out;
+ return PTR_ERR(vport);
}
esw_acl_ingress_ofld_rules_destroy(esw, vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 0f052513fefa..8bdf28762f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -959,6 +959,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+ esw_apply_vport_rx_mode(esw, vport, false, false);
esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 22075943bb58..25a8076a77bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3405,6 +3405,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
+static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
+{
+ struct net *devl_net, *netdev_net;
+ struct mlx5_eswitch *esw;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
+ devl_net = devlink_net(devlink);
+
+ return net_eq(devl_net, netdev_net);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3419,6 +3431,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ return -EPERM;
+ }
+
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index c5240d38c9db..09ed6e5fa6c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -105,7 +105,6 @@ struct mlxsw_thermal {
struct thermal_zone_device *tzdev;
int polling_delay;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
- u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -468,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
return idx;
/* Normalize the state to the valid speed range. */
- state = thermal->cooling_levels[state];
+ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
@@ -859,10 +858,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
}
- /* Initialize cooling levels per PWM state. */
- for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
-
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
MLXSW_THERMAL_POLL_INT;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 045a24cacfa5..b6ee2d658b0c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* In case there are no {Port, VID} => FID mappings on the port,
@@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index bdb893476832..d0e6cd8dbe5c 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -258,6 +258,7 @@ struct ocelot_stat_layout {
struct ocelot_stats_region {
struct list_head node;
u32 base;
+ enum ocelot_stat first_stat;
int count;
u32 *buf;
};
@@ -273,6 +274,7 @@ static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS]
OCELOT_STAT(RX_ASSEMBLY_OK),
OCELOT_STAT(RX_MERGE_FRAGMENTS),
OCELOT_STAT(TX_MERGE_FRAGMENTS),
+ OCELOT_STAT(TX_MM_HOLD),
OCELOT_STAT(RX_PMAC_OCTETS),
OCELOT_STAT(RX_PMAC_UNICAST),
OCELOT_STAT(RX_PMAC_MULTICAST),
@@ -341,11 +343,12 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
*/
static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
{
- unsigned int idx = port * OCELOT_NUM_STATS;
struct ocelot_stats_region *region;
int j;
list_for_each_entry(region, &ocelot->stats_regions, node) {
+ unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat;
+
for (j = 0; j < region->count; j++) {
u64 *stat = &ocelot->stats[idx + j];
u64 val = region->buf[j];
@@ -355,8 +358,6 @@ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
*stat = (*stat & ~(u64)U32_MAX) + val;
}
-
- idx += region->count;
}
}
@@ -899,7 +900,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
if (!layout[i].reg)
continue;
- if (region && layout[i].reg == last + 4) {
+ if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
+ ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -914,6 +916,7 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
WARN_ON(last >= layout[i].reg);
region->base = layout[i].reg;
+ region->first_stat = i;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d17d1b4f2585..825356ee3492 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!*new_addr) {
+ if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 2bf18748581d..fa167b1aa019 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
}
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
vport_id = vf->vport_id;
return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
- if (!vf->vport_instance)
+ if (!vf || !vf->vport_instance)
continue;
memset(&params, 0, sizeof(params));
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3115b2c12898..eaa50050aa0b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);
+ free_irq(adpt->irq.irq, &adpt->irq);
+ cancel_work_sync(&adpt->work_thread);
+
emac_clks_teardown(adpt);
put_device(&adpt->phydev->mdio.dev);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 930496cd34ed..b50f16786c24 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -826,6 +826,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
/* disable phy pfm mode */
phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+ /* disable 10m pll off */
+ phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0);
+
rtl8168g_disable_aldps(phydev);
rtl8168g_config_eee_phy(phydev);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7022fb2005a2..d30459dbfe8f 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1304,7 +1304,8 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
static int efx_ef10_init_nic(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- netdev_features_t hw_enc_features = 0;
+ struct net_device *net_dev = efx->net_dev;
+ netdev_features_t tun_feats, tso_feats;
int rc;
if (nic_data->must_check_datapath_caps) {
@@ -1349,20 +1350,30 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- /* add encapsulated checksum offload features */
+ /* encap features might change during reset if fw variant changed */
if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
- hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- /* add encapsulated TSO features */
- if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
- netdev_features_t encap_tso_features;
+ net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ else
+ net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+ tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+ tso_feats = NETIF_F_TSO | NETIF_F_TSO6;
- hw_enc_features |= encap_tso_features | NETIF_F_TSO;
- efx->net_dev->features |= encap_tso_features;
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+ /* If this is first nic_init, or if it is a reset and a new fw
+ * variant has added new features, enable them by default.
+ * If the features are not new, maintain their current value.
+ */
+ if (!(net_dev->hw_features & tun_feats))
+ net_dev->features |= tun_feats;
+ net_dev->hw_enc_features |= tun_feats | tso_feats;
+ net_dev->hw_features |= tun_feats;
+ } else {
+ net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
+ net_dev->hw_features &= ~tun_feats;
+ net_dev->features &= ~tun_feats;
}
- efx->net_dev->hw_enc_features = hw_enc_features;
/* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false,
@@ -4021,7 +4032,10 @@ static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_IPV6_CSUM | \
NETIF_F_RXHASH | \
- NETIF_F_NTUPLE)
+ NETIF_F_NTUPLE | \
+ NETIF_F_SG | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_RXALL)
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 02c2adeb0a12..884d8d168862 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1001,21 +1001,18 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
}
/* Determine netdevice features */
- net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
- net_dev->features |= NETIF_F_TSO6;
- if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
- net_dev->hw_enc_features |= NETIF_F_TSO6;
- }
- /* Check whether device supports TSO */
- if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
- net_dev->features &= ~NETIF_F_ALL_TSO;
+ net_dev->features |= efx->type->offload_features;
+
+ /* Add TSO features */
+ if (efx->type->tso_versions && efx->type->tso_versions(efx))
+ net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
+ /* Determine user configurable features */
net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
/* Disable receiving frames with bad FCS, by default. */
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a2e511912e6a..a690d139e177 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,8 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
- /* Indicate that the MAC is responsible for managing PHY PM */
- phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -1066,6 +1064,7 @@ static int smsc911x_mii_init(struct platform_device *pdev,
struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
+ struct phy_device *phydev;
int err = -ENXIO;
pdata->mii_bus = mdiobus_alloc();
@@ -1108,6 +1107,10 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
+ phydev = phy_find_first(pdata->mii_bus);
+ if (phydev)
+ phydev->mac_managed_pm = true;
+
return 0;
err_out_free_bus_2:
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6b5d96bced47..54bb072aeb2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -418,6 +418,7 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int host_dma_width;
unsigned int rssen;
unsigned int vlhash;
unsigned int sphen;
@@ -531,7 +532,6 @@ struct mac_device_info {
unsigned int xlgmac;
unsigned int num_vlan;
u32 vlan_filter[32];
- unsigned int promisc;
bool vlan_fail_q_en;
u8 vlan_fail_q;
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index ac550d1ac015..2a2be65d65a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -288,7 +288,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
- plat_dat->addr64 = dwmac->ops->addr_width;
+ plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 7deb1f817dac..ab9f876b6df7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
priv->plat->mdio_bus_data->xpcs_an_inband = false;
} else {
priv->plat->max_speed = 1000;
- priv->plat->mdio_bus_data->xpcs_an_inband = true;
}
}
@@ -684,7 +683,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
intel_priv->is_pse = true;
plat->bus_id = 2;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
@@ -725,7 +724,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
intel_priv->is_pse = true;
plat->bus_id = 3;
- plat->addr64 = 32;
+ plat->host_dma_width = 32;
plat->clk_ptp_rate = 200000000;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 2f7d8e4561d9..9ae31e3dc821 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
plat->riwt_off = 1;
plat->maxmtu = ETH_DATA_LEN;
- plat->addr64 = priv_plat->variant->dma_bit_mask;
+ plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
plat->init = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 8c7a0b7c9952..36251ec2589c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -472,12 +472,6 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
if (vid > 4095)
return -EINVAL;
- if (hw->promisc) {
- netdev_err(dev,
- "Adding VLAN in promisc mode not supported\n");
- return -EPERM;
- }
-
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
/* For single VLAN filter, VID 0 means VLAN promiscuous */
@@ -527,12 +521,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
{
int i, ret = 0;
- if (hw->promisc) {
- netdev_err(dev,
- "Deleting VLAN in promisc mode not supported\n");
- return -EPERM;
- }
-
/* Single Rx VLAN Filter */
if (hw->num_vlan == 1) {
if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
@@ -557,39 +545,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
return ret;
}
-static void dwmac4_vlan_promisc_enable(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, 0);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- if (value & GMAC_VLAN_VTHM) {
- value &= ~GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
- }
-}
-
static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
struct mac_device_info *hw)
{
@@ -709,22 +664,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
/* VLAN filtering */
- if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
+ value &= ~GMAC_PACKET_FILTER_VTFE;
+ else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
value |= GMAC_PACKET_FILTER_VTFE;
writel(value, ioaddr + GMAC_PACKET_FILTER);
-
- if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
- if (!hw->promisc) {
- hw->promisc = 1;
- dwmac4_vlan_promisc_enable(dev, hw);
- }
- } else {
- if (hw->promisc) {
- hw->promisc = 0;
- dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
- }
- }
}
static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8f543c3ab5c5..d7fcab057032 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
int ret;
+ if (!phylink_expects_phy(priv->phylink))
+ return 0;
+
fwnode = of_fwnode_handle(priv->plat->phylink_node);
if (!fwnode)
fwnode = dev_fwnode(priv->device);
if (fwnode)
- ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ else
+ phy_fwnode = NULL;
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!fwnode || ret) {
+ if (!phy_fwnode || IS_ERR(phy_fwnode)) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
}
ret = phylink_connect_phy(priv->phylink, phydev);
+ } else {
+ fwnode_handle_put(phy_fwnode);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
if (!priv->plat->pmt) {
@@ -1431,7 +1440,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
if (!buf->page) {
@@ -4587,7 +4596,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
unsigned int entry = rx_q->dirty_rx;
gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
- if (priv->dma_cap.addr64 <= 32)
+ if (priv->dma_cap.host_dma_width <= 32)
gfp |= GFP_DMA32;
while (dirty-- > 0) {
@@ -6205,7 +6214,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
seq_printf(seq, "\tFlexible RX Parser: %s\n",
priv->dma_cap.frpsel ? "Y" : "N");
seq_printf(seq, "\tEnhanced Addressing: %d\n",
- priv->dma_cap.addr64);
+ priv->dma_cap.host_dma_width);
seq_printf(seq, "\tReceive Side Scaling: %s\n",
priv->dma_cap.rssen ? "Y" : "N");
seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
@@ -6622,6 +6631,8 @@ int stmmac_xdp_open(struct net_device *dev)
goto init_error;
}
+ stmmac_reset_queues_param(priv);
+
/* DMA CSR Channel configuration */
for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
@@ -6948,7 +6959,7 @@ static void stmmac_napi_del(struct net_device *dev)
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
+ int ret = 0, i;
if (netif_running(dev))
stmmac_release(dev);
@@ -6957,6 +6968,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->plat->rx_queues_to_use = rx_cnt;
priv->plat->tx_queues_to_use = tx_cnt;
+ if (!netif_is_rxfh_configured(dev))
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+ rx_cnt);
stmmac_napi_add(dev);
@@ -7178,20 +7193,22 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
- /* The current IP register MAC_HW_Feature1[ADDR64] only define
- * 32/40/64 bit width, but some SOC support others like i.MX8MP
- * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
- * So overwrite dma_cap.addr64 according to HW real design.
+ /* Ideally our host DMA address width is the same as for the
+ * device. However, it may differ and then we have to use our
+ * host DMA width for allocation and the device DMA width for
+ * register handling.
*/
- if (priv->plat->addr64)
- priv->dma_cap.addr64 = priv->plat->addr64;
+ if (priv->plat->host_dma_width)
+ priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
+ else
+ priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
- if (priv->dma_cap.addr64) {
+ if (priv->dma_cap.host_dma_width) {
ret = dma_set_mask_and_coherent(device,
- DMA_BIT_MASK(priv->dma_cap.addr64));
+ DMA_BIT_MASK(priv->dma_cap.host_dma_width));
if (!ret) {
- dev_info(priv->device, "Using %d bits DMA width\n",
- priv->dma_cap.addr64);
+ dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
+ priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
/*
* If more than 32 bits can be addressed, make sure to
@@ -7206,7 +7223,7 @@ int stmmac_dvr_probe(struct device *device,
goto error_hw_init;
}
- priv->dma_cap.addr64 = 32;
+ priv->dma_cap.host_dma_width = 32;
}
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 4e3861c47708..bcea87b7151c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2926,7 +2926,8 @@ err_free_phylink:
am65_cpsw_nuss_phylink_cleanup(common);
am65_cpts_release(common->cpts);
err_of_clear:
- of_platform_device_destroy(common->mdio_dev, NULL);
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -2956,7 +2957,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
am65_cpts_release(common->cpts);
am65_cpsw_disable_serdes_phy(common);
- of_platform_device_destroy(common->mdio_dev, NULL);
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 16ee9c29cb35..8caf85acbb6a 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -636,6 +636,10 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
val = lower_32_bits(cycles);
am65_cpts_write32(cpts, val, genf[req->index].length);
+ am65_cpts_write32(cpts, 0, genf[req->index].control);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi);
+ am65_cpts_write32(cpts, 0, genf[req->index].ppm_low);
+
cpts->genf_enable |= BIT(req->index);
} else {
am65_cpts_write32(cpts, 0, genf[req->index].length);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index cf8de8a7a8a1..9d535ae59626 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
+ dma_addr_t cpu_addr;
+
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- descr->bus_addr =
- dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- if (!descr->bus_addr)
+ cpu_addr = dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;
+ descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -365,26 +367,28 @@ iommu_error:
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
+ static const unsigned int rx_skb_size =
+ ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+ GELIC_NET_RXBUF_ALIGN - 1;
+ dma_addr_t cpu_addr;
int offset;
- unsigned int bufsize;
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
- descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(bufsize);
+ descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
- descr->skb->data,
- GELIC_NET_MAX_MTU,
- DMA_FROM_DEVICE));
- if (!descr->buf_addr) {
+ cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+ GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+ descr->buf_addr = cpu_to_be32(cpu_addr);
+ if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
- if (!buf) {
+ if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_MTU,
+ GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);
skb_put(skb, be32_to_cpu(descr->valid_size)?
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 68f324ed4eaf..0d98defb011e 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -19,8 +19,9 @@
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
-#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME 2312
+#define GELIC_NET_MAX_MTU 2294
+#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 77d8d7f1707e..97e2c1e13b80 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -222,7 +222,7 @@
#define WX_PX_INTA 0x110
#define WX_PX_GPIE 0x118
#define WX_PX_GPIE_MODEL BIT(0)
-#define WX_PX_IC 0x120
+#define WX_PX_IC(_i) (0x120 + (_i) * 4)
#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
#define WX_PX_IMC(_i) (0x150 + (_i) * 4)
#define WX_PX_ISB_ADDR_L 0x160
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 5b564d348c09..17412e5282de 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -352,7 +352,7 @@ static void ngbe_up(struct wx *wx)
netif_tx_start_all_queues(wx->netdev);
/* clear any pending interrupts, may auto mask */
- rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_IC(0));
rd32(wx, WX_PX_MISC_IC);
ngbe_irq_enable(wx, true);
if (wx->gpio_ctrl)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 6c0a98230557..a58ce5463686 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -229,7 +229,8 @@ static void txgbe_up_complete(struct wx *wx)
wx_napi_enable_all(wx);
/* clear any pending interrupts, may auto mask */
- rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_IC(0));
+ rd32(wx, WX_PX_IC(1));
rd32(wx, WX_PX_MISC_IC);
txgbe_irq_enable(wx, true);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 894e92ef415b..9f505cf02d96 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -503,6 +503,11 @@ static void
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ struct local_info *local = netdev_priv(dev);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ cancel_work_sync(&local->tx_timeout_task);
dev_dbg(&link->dev, "detach\n");