summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h10
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c144
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c22
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c38
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c26
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
71 files changed, 775 insertions, 441 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index ed5b465bc664..992fedbe4ce3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -64,6 +64,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
+ u32 quirks;
u32 priv_data_len;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 4435c6374f7e..7c7bf6bf163f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self)
self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
+
+ /* Disable the PTP on NICs where it's known to cause datapath
+ * problems.
+ * Ideally this should have been done by PHY provisioning, but
+ * many units have been shipped with enabled PTP block already.
+ */
+ if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
+ if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
+ aq_phy_disable_ptp(self->aq_hw);
}
for (i = 0U; i < self->aq_vecs; i++) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 2ab003065e62..439ce9692dac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -81,6 +81,8 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+#define AQ_NIC_QUIRK_BAD_PTP BIT(0)
+
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
index 51ae921e3e1f..949ac2351701 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* aQuantia Corporation Network Driver
- * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
#include "aq_phy.h"
+#define HW_ATL_PTP_DISABLE_MSK BIT(10)
+
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
{
int err = 0;
@@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw)
return true;
}
+
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
+{
+ static const u16 ptp_registers[] = {
+ 0x031e,
+ 0x031d,
+ 0x031c,
+ 0x031b,
+ };
+ u16 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i]);
+
+ aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i],
+ val & ~HW_ATL_PTP_DISABLE_MSK);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
index 84b72ad04a4a..86cc1ee836e2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* aQuantia Corporation Network Driver
- * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
#ifndef AQ_PHY_H
@@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
bool aq_phy_init(struct aq_hw_s *aq_hw);
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw);
+
#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 14d79f70cad7..2125bc20ab6a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
AQ_NIC_RATE_100M,
};
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
@@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
/* WSP, if min_rate is set for at least one TC.
* RR otherwise.
+ *
+ * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
+ * overwrite it here in that case.
*/
- hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ if (!nic_cfg->is_ptp)
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
* leave Descriptor TC Arbiter as RR.
*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 30f468f2084d..16091af17980 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
-
-#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112;
#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
-
-#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112
extern const struct aq_hw_ops hw_atl_ops_b0;
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 112edbd30823..38cce66ef212 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
if (IS_ERR(ag->mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- return PTR_ERR(ag->mdio_reset);
+ err = PTR_ERR(ag->mdio_reset);
+ goto mdio_err_put_clk;
}
mii_bus->name = "ag71xx_mdio";
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6a884df44612..7463a1847ceb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
*/
void bnxt_set_ring_params(struct bnxt *bp)
{
- u32 ring_size, rx_size, rx_space;
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
u32 agg_factor = 0, agg_ring_size = 0;
/* 8 for CRC and VLAN */
@@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
- ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ max_rx_cmpl = bp->rx_ring_size;
+ /* MAX TPA needs to be added because TPA_START completions are
+ * immediately recycled, so the TPA completions are not bound by
+ * the RX ring size.
+ */
+ if (bp->flags & BNXT_FLAG_TPA)
+ max_rx_cmpl += bp->max_tpa;
+ /* RX and TPA completions are 32-byte, all others are 16-byte */
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
bp->cp_ring_size = ring_size;
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
@@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
- if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
- &bp->sp_event))
- bnxt_init_ethtool_link_settings(bp);
-
rc = bnxt_update_link(bp, true);
- mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
+
+ if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
+ &bp->sp_event))
+ bnxt_init_ethtool_link_settings(bp);
+ mutex_unlock(&bp->link_lock);
}
if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6b88143af5ea..b4aa56dc4f9f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- if (netif_running(dev))
+ if (netif_running(dev)) {
+ mutex_lock(&bp->link_lock);
rc = bnxt_hwrm_set_pause(bp);
+ mutex_unlock(&bp->link_lock);
+ }
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index af924a8b885f..e471b14fc6e9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
#define VALIDATE_MASK(x) \
bcmgenet_hfb_validate_mask(&(x), sizeof(x))
-static int bcmgenet_hfb_insert_data(u32 *f, int offset,
- void *val, void *mask, size_t size)
+static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
+ u32 offset, void *val, void *mask,
+ size_t size)
{
- int index;
- u32 tmp;
+ u32 index, tmp;
- index = offset / 2;
- tmp = f[index];
+ index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
+ tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
while (size--) {
if (offset++ & 1) {
@@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
tmp |= 0x10000;
break;
}
- f[index++] = tmp;
+ bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
if (size)
- tmp = f[index];
+ tmp = bcmgenet_hfb_readl(priv,
+ index * sizeof(u32));
} else {
tmp &= ~0xCFF00;
tmp |= (*(unsigned char *)val++) << 8;
@@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
break;
}
if (!size)
- f[index] = tmp;
+ bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
}
}
return 0;
}
-static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue, int f_index)
-{
- u32 base = f_index * priv->hw_params->hfb_filter_size;
- int i;
-
- for (i = 0; i < f_length; i++)
- bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
-
- bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
-}
-
-static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
- struct bcmgenet_rxnfc_rule *rule)
+static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
+ struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- int err = 0, offset = 0, f_length = 0;
+ u32 offset = 0, f_length = 0, f;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- u32 *f_data;
-
- f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
- GFP_KERNEL);
- if (!f_data)
- return -ENOMEM;
+ f = fs->location;
if (fs->flow_type & FLOW_MAC_EXT) {
- bcmgenet_hfb_insert_data(f_data, 0,
+ bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
sizeof(fs->h_ext.h_dest));
}
@@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
if (fs->flow_type & FLOW_EXT) {
if (fs->m_ext.vlan_etype ||
fs->m_ext.vlan_tci) {
- bcmgenet_hfb_insert_data(f_data, 12,
+ bcmgenet_hfb_insert_data(priv, f, 12,
&fs->h_ext.vlan_etype,
&fs->m_ext.vlan_etype,
sizeof(fs->h_ext.vlan_etype));
- bcmgenet_hfb_insert_data(f_data, 14,
+ bcmgenet_hfb_insert_data(priv, f, 14,
&fs->h_ext.vlan_tci,
&fs->m_ext.vlan_tci,
sizeof(fs->h_ext.vlan_tci));
@@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
f_length += DIV_ROUND_UP(ETH_HLEN, 2);
- bcmgenet_hfb_insert_data(f_data, 0,
+ bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_u.ether_spec.h_dest,
&fs->m_u.ether_spec.h_dest,
sizeof(fs->h_u.ether_spec.h_dest));
- bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
+ bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
&fs->h_u.ether_spec.h_source,
&fs->m_u.ether_spec.h_source,
sizeof(fs->h_u.ether_spec.h_source));
- bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
&fs->h_u.ether_spec.h_proto,
&fs->m_u.ether_spec.h_proto,
sizeof(fs->h_u.ether_spec.h_proto));
@@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
/* Specify IP Ether Type */
val_16 = htons(ETH_P_IP);
mask_16 = 0xFFFF;
- bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
&val_16, &mask_16, sizeof(val_16));
- bcmgenet_hfb_insert_data(f_data, 15 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 15 + offset,
&fs->h_u.usr_ip4_spec.tos,
&fs->m_u.usr_ip4_spec.tos,
sizeof(fs->h_u.usr_ip4_spec.tos));
- bcmgenet_hfb_insert_data(f_data, 23 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 23 + offset,
&fs->h_u.usr_ip4_spec.proto,
&fs->m_u.usr_ip4_spec.proto,
sizeof(fs->h_u.usr_ip4_spec.proto));
- bcmgenet_hfb_insert_data(f_data, 26 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 26 + offset,
&fs->h_u.usr_ip4_spec.ip4src,
&fs->m_u.usr_ip4_spec.ip4src,
sizeof(fs->h_u.usr_ip4_spec.ip4src));
- bcmgenet_hfb_insert_data(f_data, 30 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 30 + offset,
&fs->h_u.usr_ip4_spec.ip4dst,
&fs->m_u.usr_ip4_spec.ip4dst,
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
@@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
/* Only supports 20 byte IPv4 header */
val_8 = 0x45;
mask_8 = 0xFF;
- bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
+ bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
&val_8, &mask_8,
sizeof(val_8));
size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
- bcmgenet_hfb_insert_data(f_data,
+ bcmgenet_hfb_insert_data(priv, f,
ETH_HLEN + 20 + offset,
&fs->h_u.usr_ip4_spec.l4_4_bytes,
&fs->m_u.usr_ip4_spec.l4_4_bytes,
@@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
break;
}
+ bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
/* Ring 0 flows can be handled by the default Descriptor Ring
* We'll map them to ring 0, but don't enable the filter
*/
- bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
- fs->location);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
rule->state = BCMGENET_RXNFC_STATE_DISABLED;
} else {
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter(priv, f_data, f_length,
- fs->ring_cookie, fs->location);
- bcmgenet_hfb_enable_filter(priv, fs->location);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
+ fs->ring_cookie);
+ bcmgenet_hfb_enable_filter(priv, f);
rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
-
- kfree(f_data);
-
- return err;
}
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
*/
+static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
+{
+ u32 base, i;
+
+ base = f_index * priv->hw_params->hfb_filter_size;
+ for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
+}
+
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
@@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
bcmgenet_hfb_reg_writel(priv, 0x0,
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
- priv->hw_params->hfb_filter_size; i++)
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
+ bcmgenet_hfb_clear_filter(priv, i);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
{
int i;
+ INIT_LIST_HEAD(&priv->rxnfc_list);
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
return;
- INIT_LIST_HEAD(&priv->rxnfc_list);
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
- if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ }
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
- err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
- if (err) {
- netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
- err);
- return err;
- }
+ bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
list_add_tail(&loc_rule->list, &priv->rxnfc_list);
@@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
- if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ }
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err;
+ goto err_clk_disable;
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
@@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_wol)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
err = PTR_ERR(priv->clk_wol);
- goto err;
+ goto err_clk_disable;
}
priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
err = PTR_ERR(priv->clk_eee);
- goto err;
+ goto err_clk_disable;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
@@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 offset, reg;
+ u32 reg;
int ret;
if (!netif_running(dev))
@@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- offset = HFB_FLT_ENABLE_V3PLUS;
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
+ /* Restore hardware filters */
+ bcmgenet_hfb_clear(priv);
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ bcmgenet_hfb_create_rxnfc_filter(priv, rule);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 offset;
if (!netif_running(dev))
return 0;
@@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
- /* Preserve filter state and disable filtering */
- priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- offset = HFB_FLT_ENABLE_V3PLUS;
- priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
- priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ /* Disable filtering */
bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index a12cb59298f4..f6ca01da141d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -696,7 +696,6 @@ struct bcmgenet_priv {
u32 wolopts;
u8 sopass[SOPASS_MAX];
bool wol_active;
- u32 hfb_en[3];
struct bcmgenet_mib_counters mib;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 4ea6a26b04f7..1c86eddb1b51 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
+ priv->crc_fwd_en = 0;
/* Disable Magic Packet Detection */
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~(MPD_EN | MPD_PW_EN);
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already reset so skip the rest */
+ reg &= ~(MPD_EN | MPD_PW_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ }
/* Disable WAKE_FILTER Detection */
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return; /* already reset so skip the rest */
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->crc_fwd_en = 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 52582e8ed90e..2213e6ab8151 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2821,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= WAKE_MAGIC;
+
+ if (bp->wol & MACB_WOL_ENABLED)
+ wol->wolopts |= WAKE_MAGIC;
+ }
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2833,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct macb *bp = netdev_priv(netdev);
int ret;
+ /* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- if (!ret)
- return 0;
+ /* Don't manage WoL on MAC if handled by the PHY
+ * or if there's a failure in talking to the PHY
+ */
+ if (!ret || ret != -EOPNOTSUPP)
+ return ret;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -3730,7 +3736,7 @@ static int macb_init(struct platform_device *pdev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(bp->phy_interface))
val = GEM_BIT(RGMII);
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
@@ -4422,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev)
bp->wol = 0;
if (of_get_property(np, "magic-packet", NULL))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
spin_lock_init(&bp->lock);
@@ -4598,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev)
bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
}
- netif_carrier_off(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_suspend(dev);
return 0;
}
@@ -4616,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- pm_runtime_force_resume(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL));
@@ -4654,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
@@ -4670,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 32a45dc51ed7..92eee66cbc84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
txq_info = adap->sge.uld_txq_info[tx_uld_type];
if (unlikely(!txq_info)) {
WARN_ON(true);
+ kfree_skb(skb);
return NET_XMIT_DROP;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8d13ea370db1..66e67b24a887 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
+ clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(port->pclk);
return ret;
+ }
ret = register_netdev(netdev);
if (!ret) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 2972244e6eb0..43570f4911ea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
DMA_BIT_MASK(40));
if (err) {
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ goto free_netdev;
}
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index f150cd454fa4..0998ceb1a26e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 4fac57dbb3c8..7a9675bd36e8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
return 0;
err_reg_netdev:
+ enetc_mdio_remove(pf);
enetc_of_put_phy(priv);
enetc_free_msix(priv);
err_alloc_msix:
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index d8d76da51c5e..832a2175636d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -590,6 +590,7 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
+void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3982285ed020..cc7fbfc09354 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1294,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
ndev->stats.tx_bytes += skb->len;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
- fep->bufdesc_ex) {
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -2723,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return -ENODEV;
if (fep->bufdesc_ex) {
- if (cmd == SIOCSHWTSTAMP)
- return fec_ptp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return fec_ptp_get(ndev, rq);
+ bool use_fec_hwts = !phy_has_hwtstamp(phydev);
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_set(ndev, rq);
+ fec_ptp_disable_hwts(ndev);
+ } else if (cmd == SIOCGHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_get(ndev, rq);
+ }
}
return phy_mii_ioctl(phydev, rq, cmd);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 945643c02615..f8a592c96beb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+/**
+ * fec_ptp_disable_hwts - disable hardware time stamping
+ * @ndev: pointer to net_device
+ */
+void fec_ptp_disable_hwts(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->hwts_tx_en = 0;
+ fep->hwts_rx_en = 0;
+}
+
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b3c69e9038ea..b513b8c5c3b5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
+ if (!IS_ERR(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
+ } else {
+ eth_hw_addr_random(dev);
+ dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
+ }
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d041cac9a487..088550db2de7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -77,6 +77,7 @@
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
enum hns_desc_type {
+ DESC_TYPE_UNKNOWN,
DESC_TYPE_SKB,
DESC_TYPE_FRAGLIST_SKB,
DESC_TYPE_PAGE,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c38f3bbe7d97..71ed4c54f6d5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
- if (type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
- int ret;
-
- ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret < 0))
- return ret;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1118,12 +1110,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return -ENOMEM;
}
+ desc_cb->priv = priv;
desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
- desc_cb->priv = priv;
- desc_cb->dma = dma;
- desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16(size);
desc->tx.bdtp_fe_sc_vld_ra_ri =
@@ -1135,18 +1127,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
frag_buf_num = hns3_tx_bd_count(size);
- sizeoflast = size & HNS3_TX_LAST_SIZE_M;
+ sizeoflast = size % HNS3_MAX_BD_SIZE;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
- desc_cb->priv = priv;
- desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
- desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
- type == DESC_TYPE_SKB) && !k) ?
- type : DESC_TYPE_PAGE;
-
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
@@ -1158,7 +1143,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
- desc_cb = &ring->desc_cb[ring->next_to_use];
desc = &ring->desc[ring->next_to_use];
}
@@ -1346,6 +1330,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
unsigned int i;
for (i = 0; i < ring->desc_num; i++) {
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+
+ memset(desc, 0, sizeof(*desc));
+
/* check if this is where we started */
if (ring->next_to_use == next_to_use_orig)
break;
@@ -1353,6 +1341,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
+ if (!ring->desc_cb[ring->next_to_use].dma)
+ continue;
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
ring->desc_cb[ring->next_to_use].type ==
@@ -1369,6 +1360,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].length = 0;
ring->desc_cb[ring->next_to_use].dma = 0;
+ ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
}
}
@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
- netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 66cd4395f781..a8776620acbc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -165,8 +165,6 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
-#define HNS3_TX_LAST_SIZE_M 0xffff
-
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d6bfdc6520df..36575e72a915 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
delay_time);
}
-static int hclge_get_mac_link_status(struct hclge_dev *hdev)
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
{
struct hclge_link_status_cmd *req;
struct hclge_desc desc;
- int link_status;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
@@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS_UP_M;
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
- return !!link_status;
+ return 0;
}
-static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
{
- unsigned int mac_state;
- int link_stat;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *link_status = HCLGE_LINK_STATUS_DOWN;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
return 0;
- mac_state = hclge_get_mac_link_status(hdev);
-
- if (hdev->hw.mac.phydev) {
- if (hdev->hw.mac.phydev->state == PHY_RUNNING)
- link_stat = mac_state &
- hdev->hw.mac.phydev->link;
- else
- link_stat = 0;
-
- } else {
- link_stat = mac_state;
- }
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
+ return 0;
- return !!link_stat;
+ return hclge_get_mac_link_status(hdev, link_status);
}
static void hclge_update_link_status(struct hclge_dev *hdev)
@@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
struct hnae3_handle *rhandle;
struct hnae3_handle *handle;
int state;
+ int ret;
int i;
if (!client)
@@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
return;
- state = hclge_get_mac_phy_link(hdev);
+ ret = hclge_get_mac_phy_link(hdev, &state);
+ if (ret) {
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+ return;
+ }
+
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
@@ -5809,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
- spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5854,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@@ -5866,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
- spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5883,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
-
- spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6266,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@@ -6276,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
- memset(&new_tuples, 0, sizeof(new_tuples));
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
- spin_lock_bh(&hdev->fd_rule_lock);
-
/* when there is already fd rule existed add by user,
* arfs should not work
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@@ -6371,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6423,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
- if (!enable)
+
+ if (!enable) {
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
- else
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ } else {
hclge_restore_fd_entries(handle);
+ }
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6524,14 +6522,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
{
#define HCLGE_MAC_LINK_STATUS_NUM 100
+ int link_status;
int i = 0;
int ret;
do {
- ret = hclge_get_mac_link_status(hdev);
- if (ret < 0)
+ ret = hclge_get_mac_link_status(hdev, &link_status);
+ if (ret)
return ret;
- else if (ret == link_ret)
+ if (link_status == link_ret)
return 0;
msleep(HCLGE_LINK_STATUS_MS);
@@ -6542,9 +6541,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
bool is_phy)
{
-#define HCLGE_LINK_STATUS_DOWN 0
-#define HCLGE_LINK_STATUS_UP 1
-
int link_ret;
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
@@ -6891,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
+ spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@@ -9045,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 46e6e0fef3ba..9bbdd4557c27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -317,6 +317,9 @@ enum hclge_link_fail_code {
HCLGE_LF_XSFP_ABSENT,
};
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a10b022d1951..9162856de1b1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
+ int ret;
rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- rtnl_unlock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
- else
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
- rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fd7eae25fe9..5afb3c9c52d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f999cca37a8a..489bb5b59475 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
- if (ret_val) {
+ if (ret_val)
e_warn("Failed to disable ULP\n");
- goto out;
- }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8bb3db2cbd41..6e5861bfb0fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 64786568af0d..75a8c407e815 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
+ rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
+ rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index f4227517dc8e..92a3db69a6cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f6a1f8666f95..a1c45b39a230 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
{
u32 val;
int ret;
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- if (state->interface !=
- PHY_INTERFACE_MODE_TRGMII)
- mtk_gmac0_rgmii_adjust(mac->hw,
- state->speed);
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3d9aa7da95e9..2d3e45780719 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4356,12 +4356,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index bdb71332cbf2..3e44e4d820c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index eefeb1cdc2ee..245a99f69641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
- tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else {
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
if (!tun_dst) {
- WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 951ea26d96bc..e472ed0eacfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 58b13192df23..2805416c32a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 37b176801bcc..038a0f1cecec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 081f15074cac..3b892ec301b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
+ enum mlx5_port_status state)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int vport_admin_state;
+
+ mlx5_set_port_admin_status(mdev, state);
+
+ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ if (state == MLX5_PORT_UP)
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ else
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
+ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
@@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@@ -5390,6 +5409,8 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ cancel_work_sync(&priv->update_stats_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 006807e04eda..9519a61bd8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -936,6 +936,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cc8412151ca0..fcedb5bdca9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1116ab9bea6c..43005caff09e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1608,7 +1608,7 @@ abort:
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
-
+ esw_destroy_tsar(esw);
return err;
}
@@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
- esw_destroy_tsar(esw);
-
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ esw_destroy_tsar(esw);
+
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
+ if (vport == MLX5_VPORT_UPLINK) {
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
+ other_vport = 0;
+ vport = 0;
+ }
mutex_lock(&esw->state_lock);
- err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport, 1, link_state);
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
- mlx5_core_warn(esw->dev,
- "Failed to set vport %d link state, err = %d",
- vport, err);
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
+ vport, opmod, err);
goto unlock;
}
@@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a5175e98c0b3..5785596f13f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 060354bb211a..ed75353c56b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
+static void
+mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+ attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
-
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 13e2fb79c21a..2569bb6228b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
-/* If reverse if false then return the first flow table in next priority of
+/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
-static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_namespace *root_ns = &root->ns;
- struct fs_prio *iter_prio;
- struct fs_prio *prio;
-
- fs_get_obj(prio, ns->node.parent);
- list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
- if (iter_prio == prio &&
- !list_is_last(&prio->node.children, &iter_prio->node.list))
- return list_next_entry(iter_prio, node.list);
- }
- return NULL;
-}
-
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
- struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
+ bool next_ns;
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
- prio = find_fwd_ns_prio(root, ft->ns);
- else
- fs_get_obj(prio, ft->node.parent);
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
- return (prio) ? find_next_chained_ft(prio) : NULL;
+ return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ef0706d15a5b..2d55b7c22c03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+
if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
- pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
- pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
+enum {
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ switch (func) {
+ case PTP_PF_NONE:
+ return 0;
+ case PTP_PF_EXTTS:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+ case PTP_PF_PEROUT:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+ u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+{
+ struct mlx5_core_dev *mdev = clock->mdev;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+ u8 mode;
+ int err;
+
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
+ return PTP_PF_NONE;
+
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
+
+ if (mode == MLX5_PIN_MODE_IN)
+ return PTP_PF_EXTTS;
+ else if (mode == MLX5_PIN_MODE_OUT)
+ return PTP_PF_PEROUT;
+
+ return PTP_PF_NONE;
+}
+
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
- clock->ptp_info.pin_config[i].chan = i;
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].chan = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e9ccd333f61d..71b6185b4904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
if (err)
- return err;
+ goto err_trap_register;
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
@@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
}
@@ -1813,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
- kfree(trans);
+ kfree_rcu(trans, rcu);
return err;
}
return 0;
@@ -2050,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
- rcu_read_unlock();
- if (!found)
+ if (!found) {
+ rcu_read_unlock();
goto drop;
+ }
rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
return;
drop:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 08215fed193d..a7d86df7123f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
static int
mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
u16 offset, u16 size, void *data,
- unsigned int *p_read_size)
+ bool qsfp, unsigned int *p_read_size)
{
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
int status;
int err;
+ /* MCIA register accepts buffer size <= 48. Page of size 128 should be
+ * read by chunks of size 48, 48, 32. Align the size of the last chunk
+ * to avoid reading after the end of the page.
+ */
size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
@@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
- page = MLXSW_REG_MCIA_PAGE_GET(offset);
- offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
- /* When reading upper pages 1, 2 and 3 the offset starts at
- * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
- * specification for graphical depiction.
- * MCIA register accepts buffer size <= 48. Page of size 128
- * should be read by chunks of size 48, 48, 32. Align the size
- * of the last chunk to avoid reading after the end of the
- * page.
- */
- if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
- size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+ if (qsfp) {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 128. Please refer to "QSFP+ Memory Map"
+ * figure in SFF-8436 specification for graphical
+ * depiction.
+ */
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+ } else {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 0 and I2C high address is used. Please refer
+ * refer to "Memory Organization" figure in SFF-8472
+ * specification for graphical depiction.
+ */
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
+ offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ }
}
mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
@@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
int err;
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, &read_size);
+ module_info, false, &read_size);
if (err)
return err;
@@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
/* Verify if transceiver provides diagnostic monitoring page */
err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
SFP_DIAGMON, 1, &diag_mon,
- &read_size);
+ false, &read_size);
if (err)
return err;
@@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
int offset = ee->offset;
unsigned int read_size;
int i = 0;
+ bool qsfp;
int err;
if (!ee->len)
return -EINVAL;
memset(data, 0, ee->len);
+ /* Validate module identifier value. */
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp);
+ if (err)
+ return err;
while (i < ee->len) {
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
ee->len - i, data + i,
- &read_size);
+ qsfp, &read_size);
if (err) {
netdev_err(netdev, "Eeprom query failed\n");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index fd0e97de44e7..c04ec1a92826 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
u16 num_pages;
int err;
- mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
mlxsw_pci->core = mlxsw_core;
mbox = mlxsw_cmd_mbox_alloc();
if (!mbox)
return -ENOMEM;
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
- if (err)
- goto mbox_put;
-
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- if (err)
- goto err_out_mbox_alloc;
-
err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
if (err)
goto err_sw_reset;
@@ -1537,9 +1526,6 @@ err_query_fw:
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_alloc_irq:
err_sw_reset:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
@@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
}
static struct mlxsw_pci_queue *
@@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto err_in_mbox_alloc;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ return 0;
+
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+ return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
+ err = mlxsw_pci_cmd_init(mlxsw_pci);
+ if (err)
+ goto err_pci_cmd_init;
+
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
+ mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
iounmap(mlxsw_pci->hw_addr);
err_ioremap:
err_pci_resource_len_check:
@@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_cmd_fini(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index fcb88d4271bf..8ac987c8c8bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 770de0222e7b..0521e9d48c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
- /* Packets with link-local destination IP arriving to the router
- * are trapped to the CPU, so no need to program specific routes
- * for them. Only allow prefix routes (usually one fe80::/64) so
- * that packets are trapped for the right reason.
- */
- if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
- (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
- return true;
-
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
@@ -6262,7 +6253,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (WARN_ON(!fib_work))
+ if (!fib_work)
return NOTIFY_BAD;
fib_work->mlxsw_sp = router->mlxsw_sp;
@@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
- err = register_inetaddr_notifier(&router->inetaddr_nb);
- if (err)
- goto err_register_inetaddr_notifier;
-
- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
- err = register_inet6addr_notifier(&router->inet6addr_nb);
- if (err)
- goto err_register_inet6addr_notifier;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_init;
- mlxsw_sp->router->netevent_nb.notifier_call =
- mlxsw_sp_router_netevent_event;
- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
if (err)
goto err_mp_hash_init;
@@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
-err_dscp_init:
-err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+err_dscp_init:
+err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -8174,10 +8170,6 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- unregister_inet6addr_notifier(&router->inet6addr_nb);
-err_register_inet6addr_notifier:
- unregister_inetaddr_notifier(&router->inetaddr_nb);
-err_register_inetaddr_notifier:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 157a42c63066..1e38dfe7cf64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -422,6 +425,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ .priority = 1,
+ },
+ {
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
.priority = 2,
@@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 9cfe1fd98c30..f17da67a4622 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 9b63574b6202..b5f1849fd280 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
{
struct sk_buff **skb_ptr = NULL;
struct sk_buff **temp;
-#define NR_SKB_COMPLETED 128
+#define NR_SKB_COMPLETED 16
struct sk_buff *completed[NR_SKB_COMPLETED];
int more;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index d2708a57f2ff..4075f5e59955 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
err = nixge_of_get_resources(pdev);
if (err)
- return err;
+ goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
- return priv->tx_irq;
+ err = priv->tx_irq;
+ goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
- return priv->rx_irq;
+ err = priv->rx_irq;
+ goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index e03ea9b18f95..095561924bdc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
- memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
+ memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
+ offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
}
static int ionic_get_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index f49486b6d04d..e55d41546cff 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
u16 link_status;
bool link_up;
- if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
- test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
link_status = le16_to_cpu(lif->info->status.link_status);
@@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif)
netif_carrier_on(netdev);
}
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
ionic_start_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
} else {
if (netif_carrier_ok(netdev)) {
netdev_info(netdev, "Link down\n");
netif_carrier_off(netdev);
}
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
}
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
@@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
if (f)
return 0;
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
- ctx.comp.rx_filter_add.filter_id);
+ netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_adminq_post_wait(lif, &ctx);
@@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return -ENOENT;
}
+ netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
+ addr, f->filter_id);
+
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
spin_unlock_bh(&lif->rx_filters.lock);
@@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
if (err && err != -EEXIST)
return err;
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
- ctx.cmd.rx_filter_del.filter_id);
-
return 0;
}
@@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
};
int err;
+ netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
- netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
- ctx.comp.rx_filter_add.filter_id);
-
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
}
@@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
return -ENOENT;
}
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
- le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
+ netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
+ vid, f->filter_id);
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
@@ -1993,10 +1995,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
bool running;
int err = 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
+ mutex_lock(&lif->queue_lock);
running = netif_running(lif->netdev);
if (running) {
netif_device_detach(lif->netdev);
@@ -2014,7 +2013,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
}
reset_out:
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
+ mutex_unlock(&lif->queue_lock);
return err;
}
@@ -2161,7 +2160,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
+ mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2280,15 +2281,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
cancel_work_sync(&lif->deferred.work);
cancel_work_sync(&lif->tx_timeout_work);
ionic_rx_filters_deinit(lif);
+ if (lif->netdev->features & NETIF_F_RXHASH)
+ ionic_lif_rss_deinit(lif);
}
- if (lif->netdev->features & NETIF_F_RXHASH)
- ionic_lif_rss_deinit(lif);
-
napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -2465,6 +2466,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
+ mutex_init(&lif->queue_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index ed126dd74e01..8dc2c5d77424 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -135,7 +135,6 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
- IONIC_LIF_F_QUEUE_RESET,
IONIC_LIF_F_FW_RESET,
/* leave this as last */
@@ -165,6 +164,7 @@ struct ionic_lif {
unsigned int hw_index;
unsigned int kern_pid;
u64 __iomem *kern_dbpage;
+ struct mutex queue_lock; /* lock for queue structures */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
@@ -213,12 +213,6 @@ struct ionic_lif {
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
-/* return 0 if successfully set the bit, else non-zero */
-static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
-{
- return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
-}
-
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 80eeb7696e01..cd0076fc3044 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
void ionic_rx_filter_replay(struct ionic_lif *lif)
{
struct ionic_rx_filter_add_cmd *ac;
+ struct hlist_head new_id_list;
struct ionic_admin_ctx ctx;
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
+ unsigned int key;
unsigned int i;
int err;
+ INIT_HLIST_HEAD(&new_id_list);
ac = &ctx.cmd.rx_filter_add;
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
@@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif)
ac->mac.addr);
break;
}
+ spin_lock_bh(&lif->rx_filters.lock);
+ ionic_rx_filter_free(lif, f);
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ continue;
}
+
+ /* remove from old id list, save new id in tmp list */
+ spin_lock_bh(&lif->rx_filters.lock);
+ hlist_del(&f->by_id);
+ spin_unlock_bh(&lif->rx_filters.lock);
+ f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
+ hlist_add_head(&f->by_id, &new_id_list);
}
}
+
+ /* rebuild the by_id hash lists with the new filter ids */
+ spin_lock_bh(&lif->rx_filters.lock);
+ hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
+ key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
+ head = &lif->rx_filters.by_id[key];
+ hlist_add_head(&f->by_id, head);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filters_init(struct ionic_lif *lif)
@@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif)
spin_lock_init(&lif->rx_filters.lock);
+ spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
}
+ spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
@@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif)
struct hlist_node *tmp;
unsigned int i;
+ spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id)
ionic_rx_filter_free(lif, f);
}
+ spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
@@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
f->rxq_index = rxq_index;
memcpy(&f->cmd, ac, sizeof(f->cmd));
+ netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
INIT_HLIST_NODE(&f->by_hash);
INIT_HLIST_NODE(&f->by_id);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index b7f900c11834..85eb8f276a37 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
- /* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
- stats->dropped++;
- return;
- }
-
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 08ba9d54ab63..d13ec88313c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
enum protocol_type proto;
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
- DP_NOTICE(p_hwfn,
- "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9c26fde663b3..dbdac983ccde 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
/* Log and clear previous pglue_b errors if such exist */
- qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b7b974f0ef21..5eec1fc6229d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -257,9 +257,10 @@ out:
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
-int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init)
{
+ char msg[256];
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
@@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_NOTICE(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked.\n"
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ snprintf(msg, sizeof(msg),
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
+ tmp,
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
@@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
- if (tmp & PGLUE_ATTENTION_ICPL_VALID)
- DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
+ snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
+ }
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
@@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
{
- return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+ return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
}
static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
@@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn, "MFW indication via attention\n");
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index e09db3386367..110169e90121 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
-int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt);
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init);
#endif
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index a442bcf64b9c..99f7aae102ce 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct ravb_private *priv = container_of(work, struct ravb_private,
work);
struct net_device *ndev = priv->ndev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
- ravb_stop_dma(ndev);
+ if (ravb_stop_dma(ndev)) {
+ /* If ravb_stop_dma() fails, the hardware is still operating
+ * for TX and/or RX. So, this should not call the following
+ * functions because ravb_dmac_init() is possible to fail too.
+ * Also, this should not retry ravb_stop_dma() again and again
+ * here because it's possible to wait forever. So, this just
+ * re-enables the TX and RX and skip the following
+ * re-initialization procedure.
+ */
+ ravb_rcv_snd_enable(ndev);
+ goto out;
+ }
ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC);
/* Device init */
- ravb_dmac_init(ndev);
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ /* If ravb_dmac_init() fails, descriptors are freed. So, this
+ * should return here to avoid re-enabling the TX and RX in
+ * ravb_emac_init().
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return;
+ }
ravb_emac_init(ndev);
+out:
/* Initialise PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_init(ndev, priv->pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 90410f9d3b1a..1c4fea9c3ec4 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
"power", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Optional reset GPIO configured? Minimum 100 ns reset needed
@@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
"reset", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Need to wait for optional EEPROM to load, max 750 us according
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f2638446b62e..81b554dd7221 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev)
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
priv->pinmode_mask, priv->pinmode_val);
if (ret)
- return ret;
+ goto out_reset_assert;
ave_global_reset(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1492648247d9..6d778bc3d012 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
port->ndev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM;
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG;