summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
commit3ab0a7a0c349a1d7beb2bb371a62669d1528269d (patch)
treed2ae17c3bfc829ce0c747ad97021cd4bc8fb11dc /drivers/net/ethernet
parent92ec804f3dbf0d986f8e10850bfff14f316d7aaf (diff)
parent805c6d3c19210c90c109107d189744e960eae025 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Two minor conflicts: 1) net/ipv4/route.c, adding a new local variable while moving another local variable and removing it's initial assignment. 2) drivers/net/dsa/microchip/ksz9477.c, overlapping changes. One pretty prints the port mode differently, whilst another changes the driver to try and obtain the port mode from the port node rather than the switch node. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c34
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c40
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c20
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c24
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h20
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c19
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c249
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c11
-rw-r--r--drivers/net/ethernet/sfc/ef100.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c53
54 files changed, 664 insertions, 428 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 53f64ca673c3..65c298f1f333 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3782,6 +3782,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
return -EOPNOTSUPP;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3852,7 +3853,7 @@ static void bnxt_init_stats(struct bnxt *bp)
tx_masks = stats->hw_masks;
tx_count = sizeof(struct tx_port_stats_ext) / 8;
- flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
rc = bnxt_hwrm_port_qstats_ext(bp, flags);
if (rc) {
mask = (1ULL << 40) - 1;
@@ -4305,7 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (BNXT_NO_FW_ACCESS(bp))
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -5723,7 +5724,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (BNXT_NO_FW_ACCESS(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
@@ -7817,7 +7818,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
- else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
@@ -9310,18 +9311,16 @@ static ssize_t bnxt_show_temp(struct device *dev,
struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev);
u32 len = 0;
+ int rc;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (len)
- return len;
-
- return sprintf(buf, "unknown\n");
+ return rc ?: len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -9341,7 +9340,16 @@ static void bnxt_hwmon_close(struct bnxt *bp)
static void bnxt_hwmon_open(struct bnxt *bp)
{
+ struct hwrm_temp_monitor_query_input req = {0};
struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_close(bp);
+ return;
+ }
if (bp->hwmon_dev)
return;
@@ -11778,6 +11786,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_cancel_sp_work(bp);
+ bp->sp_event = 0;
+
bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
@@ -11785,9 +11797,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -12088,7 +12097,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size;
+ int i, len, pos, ro_size, size;
ssize_t vpd_size;
u8 *vpd_data;
@@ -12123,7 +12132,8 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
if (len + pos > vpd_size)
goto read_sn;
- strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+ size = min(len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
@@ -12136,7 +12146,8 @@ read_sn:
if (len + pos > vpd_size)
goto exit;
- strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+ size = min(len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5a13eb66beda..0ef89dabfd61 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1737,6 +1737,10 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_NO_FW_ACCESS(bp) \
+ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
+ pci_channel_offline((bp)->pdev))
+
struct bnxt_irq *irq_tbl;
int total_irqs;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 5a65f28ef771..6a3453f46d9a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1322,6 +1322,9 @@ static int bnxt_get_regs_len(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int reg_len;
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
reg_len = BNXT_PXP_REG_LEN;
if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
@@ -1804,9 +1807,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (epause->autoneg) {
- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
- return -EINVAL;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ rc = -EINVAL;
+ goto pause_exit;
+ }
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
if (bp->hwrm_spec_code >= 0x10201)
@@ -1827,11 +1833,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- if (netif_running(dev)) {
- mutex_lock(&bp->link_lock);
+ if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
- mutex_unlock(&bp->link_lock);
- }
+
+pause_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -2568,8 +2574,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
struct bnxt *bp = netdev_priv(dev);
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -2578,19 +2583,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
+ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
if (!edata->eee_enabled)
goto eee_ok;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
netdev_warn(dev, "EEE requires autoneg\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
if (edata->tx_lpi_enabled) {
if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
bp->lpi_tmr_lo, bp->lpi_tmr_hi);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
} else if (!bp->lpi_tmr_hi) {
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
@@ -2600,7 +2609,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
} else if (edata->advertised & ~advertising) {
netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
edata->advertised, advertising);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
eee->advertised = edata->advertised;
@@ -2612,6 +2622,8 @@ eee_ok:
if (netif_running(dev))
rc = bnxt_hwrm_set_link_setting(bp, false, true);
+eee_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 830c537bc08c..9c8f40e8a721 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -647,8 +647,7 @@ static void macb_mac_link_up(struct phylink_config *config,
ctrl |= GEM_BIT(GBE);
}
- /* We do not support MLO_PAUSE_RX yet */
- if (tx_pause)
+ if (rx_pause)
ctrl |= MACB_BIT(PAE);
macb_set_tx_clk(bp->tx_clk, speed, ndev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index f6c1ec140e09..6ec5f2f26f05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1908,13 +1908,16 @@ out:
static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
struct filter_entry *f)
{
- if (f->fs.hitcnts)
+ if (f->fs.hitcnts) {
set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
- TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
+ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
+ TCB_TIMESTAMP_V(0ULL),
+ 1);
+ set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
- TCB_TIMESTAMP_V(0ULL) |
TCB_RTT_TS_RECENT_AGE_V(0ULL),
1);
+ }
if (f->fs.newdmac)
set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
index b1a073eea60b..a020e8490681 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap)
{
struct mps_entries_ref *mps_entry, *tmp;
- if (!list_empty(&adap->mps_ref))
+ if (list_empty(&adap->mps_ref))
return;
spin_lock(&adap->mps_ref_lock);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index e648724c2c36..d9f6c19940ef 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
#define DSL CONFIG_DE2104X_DSL
#endif
-#define DE_RX_RING_SIZE 64
+#define DE_RX_RING_SIZE 128
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index 3ea51dd9374b..a24b20f76938 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -66,8 +66,8 @@ struct dpmac_cmd_get_counter {
};
struct dpmac_rsp_get_counter {
- u64 pad;
- u64 counter;
+ __le64 pad;
+ __le64 counter;
};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 26d5981b798f..177334f0adb1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1053,7 +1053,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
err_reg_netdev:
enetc_teardown_serdes(priv);
- enetc_mdio_remove(pf);
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
@@ -1061,6 +1060,7 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
+ enetc_mdio_remove(pf);
enetc_of_put_phy(pf);
err_map_pf_space:
enetc_pci_remove(pdev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index ed3829ae4ef1..a769273b36f7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -334,7 +334,7 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @enable: false - request reset , true - drop reset
+ * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -357,7 +357,7 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @enable: false - request reset , true - drop reset
+ * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 4eb50296f653..14e60c9e491d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -463,8 +463,8 @@ static int __lb_clean_rings(struct hns_nic_priv *priv,
/**
* nic_run_loopback_test - run loopback test
- * @nic_dev: net device
- * @loopback_type: loopback type
+ * @ndev: net device
+ * @loop_mode: loopback mode
*/
static int __lb_run_test(struct net_device *ndev,
enum hnae_loop loop_mode)
@@ -572,7 +572,7 @@ static int __lb_down(struct net_device *ndev, enum hnae_loop loop)
/**
* hns_nic_self_test - self test
- * @dev: net device
+ * @ndev: net device
* @eth_test: test cmd
* @data: test result
*/
@@ -633,7 +633,7 @@ static void hns_nic_self_test(struct net_device *ndev,
/**
* hns_nic_get_drvinfo - get net driver info
- * @dev: net device
+ * @net_dev: net device
* @drvinfo: driver info
*/
static void hns_nic_get_drvinfo(struct net_device *net_dev,
@@ -658,7 +658,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
/**
* hns_get_ringparam - get ring parameter
- * @dev: net device
+ * @net_dev: net device
* @param: ethtool parameter
*/
static void hns_get_ringparam(struct net_device *net_dev,
@@ -683,7 +683,7 @@ static void hns_get_ringparam(struct net_device *net_dev,
/**
* hns_get_pauseparam - get pause parameter
- * @dev: net device
+ * @net_dev: net device
* @param: pause parameter
*/
static void hns_get_pauseparam(struct net_device *net_dev,
@@ -701,7 +701,7 @@ static void hns_get_pauseparam(struct net_device *net_dev,
/**
* hns_set_pauseparam - set pause parameter
- * @dev: net device
+ * @net_dev: net device
* @param: pause parameter
*
* Return 0 on success, negative on failure
@@ -725,7 +725,7 @@ static int hns_set_pauseparam(struct net_device *net_dev,
/**
* hns_get_coalesce - get coalesce info.
- * @dev: net device
+ * @net_dev: net device
* @ec: coalesce info.
*
* Return 0 on success, negative on failure.
@@ -769,7 +769,7 @@ static int hns_get_coalesce(struct net_device *net_dev,
/**
* hns_set_coalesce - set coalesce info.
- * @dev: net device
+ * @net_dev: net device
* @ec: coalesce info.
*
* Return 0 on success, negative on failure.
@@ -808,7 +808,7 @@ static int hns_set_coalesce(struct net_device *net_dev,
/**
* hns_get_channels - get channel info.
- * @dev: net device
+ * @net_dev: net device
* @ch: channel info.
*/
static void
@@ -825,7 +825,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
/**
* get_ethtool_stats - get detail statistics.
- * @dev: net device
+ * @netdev: net device
* @stats: statistics info.
* @data: statistics data.
*/
@@ -883,8 +883,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
/**
* get_strings: Return a set of strings that describe the requested objects
- * @dev: net device
- * @stats: string set ID.
+ * @netdev: net device
+ * @stringset: string set ID.
* @data: objects data.
*/
static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@ -972,7 +972,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
/**
* nic_get_sset_count - get string set count witch returned by nic_get_strings.
- * @dev: net device
+ * @netdev: net device
* @stringset: string set index, 0: self test string; 1: statistics string.
*
* Return string set count.
@@ -1006,7 +1006,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset)
/**
* hns_phy_led_set - set phy LED status.
- * @dev: net device
+ * @netdev: net device
* @value: LED state.
*
* Return 0 on success, negative on failure.
@@ -1028,7 +1028,7 @@ static int hns_phy_led_set(struct net_device *netdev, int value)
/**
* nic_set_phys_id - set phy identify LED.
- * @dev: net device
+ * @netdev: net device
* @state: LED state.
*
* Return 0 on success, negative on failure.
@@ -1104,9 +1104,9 @@ hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
/**
* hns_get_regs - get net device register
- * @dev: net device
+ * @net_dev: net device
* @cmd: ethtool cmd
- * @date: register data
+ * @data: register data
*/
static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
void *data)
@@ -1126,7 +1126,7 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
/**
* nic_get_regs_len - get total register len.
- * @dev: net device
+ * @net_dev: net device
*
* Return total register len.
*/
@@ -1151,7 +1151,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
/**
* hns_nic_nway_reset - nway reset
- * @dev: net device
+ * @netdev: net device
*
* Return 0 on success, negative on failure
*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 6bb65ade1d77..c340d9acba80 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1654,6 +1654,7 @@ static void hinic_diag_test(struct net_device *netdev,
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
&test_index);
@@ -1662,9 +1663,12 @@ static void hinic_diag_test(struct net_device *netdev,
data[test_index] = 1;
}
+ netif_tx_wake_all_queues(netdev);
+
err = hinic_port_link_state(nic_dev, &link_state);
if (!err && link_state == HINIC_LINK_STATE_UP)
netif_carrier_on(netdev);
+
}
static int hinic_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index c6ce5966284c..2ebae6cb5db5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -47,8 +47,12 @@
#define MGMT_MSG_TIMEOUT 5000
+#define SET_FUNC_PORT_MBOX_TIMEOUT 30000
+
#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
+#define UPDATE_FW_MGMT_TIMEOUT 20000
+
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -361,16 +365,22 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
- if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
- timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+ if (HINIC_IS_VF(hwif)) {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
- if (HINIC_IS_VF(hwif))
return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
- in_size, buf_out, out_size, 0);
- else
+ in_size, buf_out, out_size, timeout);
+ } else {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+ else if (cmd == HINIC_PORT_CMD_UPDATE_FW)
+ timeout = UPDATE_FW_MGMT_TIMEOUT;
+
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP, timeout);
+ }
}
static void recv_mgmt_msg_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 2c63e3a690cd..19d01def891f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -191,6 +191,24 @@ err_init_txq:
return err;
}
+static void enable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_enable(&nic_dev->txqs[i].napi);
+}
+
+static void disable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_disable(&nic_dev->txqs[i].napi);
+}
+
/**
* free_txqs - Free the Logical Tx Queues of specific NIC device
* @nic_dev: the specific NIC device
@@ -440,6 +458,8 @@ int hinic_open(struct net_device *netdev)
goto err_create_txqs;
}
+ enable_txqs_napi(nic_dev);
+
err = create_rxqs(nic_dev);
if (err) {
netif_err(nic_dev, drv, netdev,
@@ -524,6 +544,7 @@ err_port_state:
}
err_create_rxqs:
+ disable_txqs_napi(nic_dev);
free_txqs(nic_dev);
err_create_txqs:
@@ -537,6 +558,9 @@ int hinic_close(struct net_device *netdev)
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
+ /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
+ disable_txqs_napi(nic_dev);
+
down(&nic_dev->mgmt_lock);
flags = nic_dev->flags;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index f403a6711e97..070a7cc6392e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -543,18 +543,25 @@ static int rx_request_irq(struct hinic_rxq *rxq)
if (err) {
netif_err(nic_dev, drv, rxq->netdev,
"Failed to set RX interrupt coalescing attribute\n");
- rx_del_napi(rxq);
- return err;
+ goto err_req_irq;
}
err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
- if (err) {
- rx_del_napi(rxq);
- return err;
- }
+ if (err)
+ goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ if (err)
+ goto err_irq_affinity;
+
+ return 0;
+
+err_irq_affinity:
+ free_irq(rq->irq, rxq);
+err_req_irq:
+ rx_del_napi(rxq);
+ return err;
}
static void rx_free_irq(struct hinic_rxq *rxq)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c249b7e6e432..8da7d46363b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -746,8 +746,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
__netif_tx_lock(netdev_txq, smp_processor_id());
-
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ if (!netif_testing(nic_dev->netdev))
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
__netif_tx_unlock(netdev_txq);
@@ -774,18 +774,6 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
return budget;
}
-static void tx_napi_add(struct hinic_txq *txq, int weight)
-{
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
- napi_enable(&txq->napi);
-}
-
-static void tx_napi_del(struct hinic_txq *txq)
-{
- napi_disable(&txq->napi);
- netif_napi_del(&txq->napi);
-}
-
static irqreturn_t tx_irq(int irq, void *data)
{
struct hinic_txq *txq = data;
@@ -819,7 +807,7 @@ static int tx_request_irq(struct hinic_txq *txq)
qp = container_of(sq, struct hinic_qp, sq);
- tx_napi_add(txq, nic_dev->tx_weight);
+ netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
@@ -836,14 +824,14 @@ static int tx_request_irq(struct hinic_txq *txq)
if (err) {
netif_err(nic_dev, drv, txq->netdev,
"Failed to set TX interrupt coalescing attribute\n");
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
return err;
}
err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
if (err) {
dev_err(&pdev->dev, "Failed to request Tx irq\n");
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
return err;
}
@@ -855,7 +843,7 @@ static void tx_free_irq(struct hinic_txq *txq)
struct hinic_sq *sq = txq->sq;
free_irq(sq->irq, txq);
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
}
/**
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6d320be47e60..a151ff37fda2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2019,16 +2019,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} else {
rc = reset_tx_pools(adapter);
- if (rc)
+ if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
rc);
goto out;
+ }
rc = reset_rx_pools(adapter);
- if (rc)
+ if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
rc);
goto out;
+ }
}
ibmvnic_disable_irqs(adapter);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8e133d6545bd..47bfb2e95e2d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1115,7 +1115,7 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0, bkt;
+ u16 num_vlans = 0, bkt;
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
@@ -1134,8 +1134,8 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
*
* Called to get number of VLANs and VLAN list present in mac_filter_hash.
**/
-static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
- s16 **vlan_list)
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
+ s16 **vlan_list)
{
struct i40e_mac_filter *f;
int i = 0;
@@ -1169,11 +1169,11 @@ err:
**/
static i40e_status
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
- bool unicast_enable, s16 *vl, int num_vlans)
+ bool unicast_enable, s16 *vl, u16 num_vlans)
{
+ i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
int i;
/* No VLAN to set promisc on, set on VSI */
@@ -1222,6 +1222,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
}
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
@@ -1235,8 +1238,15 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
}
}
+
+ if (aq_tmp)
+ aq_ret = aq_tmp;
+
return aq_ret;
}
@@ -1258,7 +1268,7 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
- int num_vlans;
+ u16 num_vlans;
s16 *vl;
vsi = i40e_find_vsi_from_id(pf, vsi_id);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 3070dfdb7eb4..2d566f3c827b 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -299,18 +299,14 @@ extern char igc_driver_name[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
-/* FIXME: These values were estimated using the ones that i225 has as
- * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
- * need to confirm them.
- */
-#define IGC_I225_TX_LATENCY_10 9542
-#define IGC_I225_TX_LATENCY_100 1024
-#define IGC_I225_TX_LATENCY_1000 178
-#define IGC_I225_TX_LATENCY_2500 64
-#define IGC_I225_RX_LATENCY_10 20662
-#define IGC_I225_RX_LATENCY_100 2213
-#define IGC_I225_RX_LATENCY_1000 448
-#define IGC_I225_RX_LATENCY_2500 160
+#define IGC_I225_TX_LATENCY_10 240
+#define IGC_I225_TX_LATENCY_100 58
+#define IGC_I225_TX_LATENCY_1000 80
+#define IGC_I225_TX_LATENCY_2500 1325
+#define IGC_I225_RX_LATENCY_10 6450
+#define IGC_I225_RX_LATENCY_100 185
+#define IGC_I225_RX_LATENCY_1000 300
+#define IGC_I225_RX_LATENCY_2500 1485
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 36c999250fcc..6a9b5102aa55 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -364,6 +364,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
struct sk_buff *skb = adapter->ptp_tx_skb;
struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw;
+ int adjust = 0;
u64 regval;
if (WARN_ON_ONCE(!skb))
@@ -373,6 +374,24 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_TX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_TX_LATENCY_2500;
+ break;
+ }
+
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+
/* Clear the lock early before calling skb_tstamp_tx so that
* applications are not woken up before the lock bit is clear. We use
* a copy of the skb pointer to ensure other threads can't change it
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 1645e4e7ebdb..635ff3a5dcfb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
}
if (rx < budget) {
- napi_complete(&ch->napi);
- ltq_dma_enable_irq(&ch->dma);
+ if (napi_complete_done(&ch->napi, rx))
+ ltq_dma_enable_irq(&ch->dma);
}
return rx;
@@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
net_dev->stats.tx_bytes += bytes;
netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
+ if (netif_queue_stopped(net_dev))
+ netif_wake_queue(net_dev);
+
if (pkts < budget) {
- napi_complete(&ch->napi);
- ltq_dma_enable_irq(&ch->dma);
+ if (napi_complete_done(&ch->napi, pkts))
+ ltq_dma_enable_irq(&ch->dma);
}
return pkts;
@@ -342,10 +345,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
{
struct xrx200_chan *ch = ptr;
- ltq_dma_disable_irq(&ch->dma);
- ltq_dma_ack_irq(&ch->dma);
+ if (napi_schedule_prep(&ch->napi)) {
+ __napi_schedule(&ch->napi);
+ ltq_dma_disable_irq(&ch->dma);
+ }
- napi_schedule(&ch->napi);
+ ltq_dma_ack_irq(&ch->dma);
return IRQ_HANDLED;
}
@@ -499,7 +504,7 @@ static int xrx200_probe(struct platform_device *pdev)
/* setup NAPI */
netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
- netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e5ddefe5cade..14df3aec285d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2027,11 +2027,11 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
- page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
- sync_len, napi);
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), napi);
+ page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
+ sync_len, napi);
}
static int
@@ -2377,8 +2377,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
&size, page);
} else {
- if (unlikely(!xdp_buf.data_hard_start))
+ if (unlikely(!xdp_buf.data_hard_start)) {
+ rx_desc->buf_phys_addr = 0;
+ page_pool_put_full_page(rxq->page_pool, page,
+ true);
continue;
+ }
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
&size, page);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 95aab8b429cf..e9010ceb5ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -601,7 +601,7 @@ struct mlx5e_rq {
struct dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
- struct bpf_prog *xdp_prog;
+ struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -1007,7 +1007,6 @@ int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 8fe8b4d6ad1c..254c84739046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -51,7 +51,7 @@ static void mlx5e_monitor_counters_work(struct work_struct *work)
monitor_counters_work);
mutex_lock(&priv->state_lock);
- mlx5e_update_ndo_stats(priv);
+ mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
mlx5e_monitor_counter_arm(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5de1cb9f5330..96608dbb9314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -490,11 +490,8 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
int err;
int i;
- if (!MLX5_CAP_GEN(dev, pcam_reg))
- return -EOPNOTSUPP;
-
- if (!MLX5_CAP_PCAM_REG(dev, pplm))
- return -EOPNOTSUPP;
+ if (!MLX5_CAP_GEN(dev, pcam_reg) || !MLX5_CAP_PCAM_REG(dev, pplm))
+ return false;
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index c6bc9224c3b1..bc5f72ec3623 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -699,6 +699,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule:
mlx5e_mod_hdr_detach(ct_priv->esw->dev,
&esw->offloads.mod_hdr, zone_rule->mh);
+ mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(spec);
return err;
@@ -958,12 +959,22 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
return 0;
}
+void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+
+ if (!ct_priv || !ct_attr->ct_labels_id)
+ return;
+
+ mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id);
+}
+
int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack)
+mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack)
{
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 3baef917a677..708c216325d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -87,12 +87,15 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
void
mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+void
+mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr);
+
int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack);
+mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack);
int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec);
@@ -130,12 +133,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
{
}
+static inline void
+mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) {}
+
static inline int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack)
+mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 9334c9c3e208..24336c60123a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -20,6 +20,11 @@ enum mlx5e_icosq_wqe_type {
};
/* General */
+static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
+{
+ return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
+}
+
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 145592788de5..562bc465f82b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -122,7 +122,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
u32 *len, struct xdp_buff *xdp)
{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
u32 act;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index bb6669d2a916..8e7b877d8a12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
{
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
u32 cqe_bcnt32 = cqe_bcnt;
- bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
- rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
- rcu_read_unlock();
-
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
* The page is owned by the userspace from now.
@@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
* allocated first from the Reuse Ring, so it has enough space.
*/
- if (likely(consumed)) {
+ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
- bool consumed;
/* wi->offset is not used in this function, because xdp->data and the
* DMA address point directly to the necessary place. Furthermore, the
@@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL;
}
- rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
- rcu_read_unlock();
-
- if (likely(consumed))
+ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 662a1dafeaad..4e574ac73019 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -106,8 +106,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- napi_synchronize(&c->napi);
- synchronize_rcu(); /* Sync with the XSK wakeup. */
+ synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index acf6d80a6bb7..6bbfcf18107d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -234,7 +234,7 @@ mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
/* Re-sync */
/* Runs in work context */
-static struct mlx5_wqe_ctrl_seg *
+static int
resync_post_get_progress_params(struct mlx5e_icosq *sq,
struct mlx5e_ktls_offload_context_rx *priv_rx)
{
@@ -258,15 +258,19 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
err = -ENOMEM;
- goto err_out;
+ goto err_free;
}
buf->priv_rx = priv_rx;
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+
+ spin_lock(&sq->channel->async_icosq_lock);
+
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ spin_unlock(&sq->channel->async_icosq_lock);
err = -ENOSPC;
- goto err_out;
+ goto err_dma_unmap;
}
pi = mlx5e_icosq_get_next_pi(sq, 1);
@@ -294,12 +298,18 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
};
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ spin_unlock(&sq->channel->async_icosq_lock);
- return cseg;
+ return 0;
+err_dma_unmap:
+ dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+err_free:
+ kfree(buf);
err_out:
priv_rx->stats->tls_resync_req_skip++;
- return ERR_PTR(err);
+ return err;
}
/* Function is called with elevated refcount.
@@ -309,10 +319,8 @@ static void resync_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
- struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_channel *c;
struct mlx5e_icosq *sq;
- struct mlx5_wq_cyc *wq;
resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
@@ -324,18 +332,9 @@ static void resync_handle_work(struct work_struct *work)
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- wq = &sq->wq;
-
- spin_lock(&c->async_icosq_lock);
- cseg = resync_post_get_progress_params(sq, priv_rx);
- if (IS_ERR(cseg)) {
+ if (resync_post_get_progress_params(sq, priv_rx))
refcount_dec(&resync->refcnt);
- goto unlock;
- }
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
-unlock:
- spin_unlock(&c->async_icosq_lock);
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -386,16 +385,17 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
u8 tracker_state, auth_state, *ctx;
+ struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
-
+ dev = resync->priv->mdev->device;
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr,
- PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
+ DMA_FROM_DEVICE);
ctx = buf->progress.ctx;
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
@@ -411,6 +411,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx->stats->tls_resync_req_end++;
out:
refcount_dec(&resync->refcnt);
+ dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
kfree(buf);
}
@@ -659,7 +660,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
- napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi);
+ synchronize_rcu(); /* Sync with NAPI */
if (!cancel_work_sync(&priv_rx->rule.work))
/* completion is needed, as the priv_rx in the add flow
* is maintained on the wqe info (wi), not on the socket.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index 01468ec27446..b949b9a7538b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -35,7 +35,6 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/tls.h"
#include "fpga/sdk.h"
#include "en_accel/tls.h"
@@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
+static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+{
+ return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+}
+
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
return NUM_TLS_SW_COUNTERS;
@@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
unsigned int i, idx = 0;
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
@@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
int i, idx = 0;
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b057a6c3a6d5..4d30a81cbadc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -158,16 +158,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
- if (mlx5e_nic_stats_grps[i]->update_stats_mask &
- MLX5E_NDO_UPDATE_STATS)
- mlx5e_nic_stats_grps[i]->update_stats(priv);
-}
-
static void mlx5e_update_stats_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
@@ -399,7 +389,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
- rq->xdp_prog = params->xdp_prog;
+ RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
rq_xdp_ix = rq->ix;
if (xsk)
@@ -408,7 +398,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (err < 0)
goto err_rq_wq_destroy;
- rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
@@ -564,8 +554,8 @@ err_free:
}
err_rq_wq_destroy:
- if (rq->xdp_prog)
- bpf_prog_put(rq->xdp_prog);
+ if (params->xdp_prog)
+ bpf_prog_put(params->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
@@ -575,10 +565,16 @@ err_rq_wq_destroy:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
+ struct mlx5e_channel *c = rq->channel;
+ struct bpf_prog *old_prog = NULL;
int i;
- if (rq->xdp_prog)
- bpf_prog_put(rq->xdp_prog);
+ /* drop_rq has neither channel nor xdp_prog. */
+ if (c)
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&c->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -874,7 +870,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+ synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
}
void mlx5e_close_rq(struct mlx5e_rq *rq)
@@ -1319,12 +1315,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
- struct mlx5e_channel *c = sq->channel;
struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- /* prevent netif_tx_wake_queue */
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
mlx5e_tx_disable_queue(sq->txq);
@@ -1399,10 +1393,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
{
- struct mlx5e_channel *c = icosq->channel;
-
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI. */
}
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
@@ -1481,7 +1473,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
struct mlx5e_channel *c = sq->channel;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI. */
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_xdpsq_descs(sq);
@@ -3577,6 +3569,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->rx_packets += rq_stats->packets + xskrq_stats->packets;
s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
+ s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -3592,7 +3585,6 @@ void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* In switchdev mode, monitor counters doesn't monitor
@@ -3627,12 +3619,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
-
- /* vport multicast also counts packets that are dropped due to steering
- * or rx out of buffer
- */
- stats->multicast =
- VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
}
static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -4341,6 +4327,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return 0;
}
+static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
+ lockdep_is_held(&rq->channel->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4399,29 +4395,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
*/
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
-
- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
- if (xsk_open)
- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
- napi_synchronize(&c->napi);
- /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
-
- old_prog = xchg(&c->rq.xdp_prog, prog);
- if (old_prog)
- bpf_prog_put(old_prog);
-
- if (xsk_open) {
- old_prog = xchg(&c->xskrq.xdp_prog, prog);
- if (old_prog)
- bpf_prog_put(old_prog);
- }
- set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
- if (xsk_open)
- set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
- /* napi_schedule in case we have missed anything */
- napi_schedule(&c->napi);
+ mlx5e_rq_replace_xdp_prog(&c->rq, prog);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
}
unlock:
@@ -5211,7 +5188,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_rx = mlx5e_update_nic_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 110addbb8992..97ba2da56cf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1180,7 +1180,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
@@ -1198,7 +1198,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index c9c82b14060a..310533fc9950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,6 +52,7 @@
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
+#include "en/txrx.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -1088,6 +1089,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_enable_ecn(rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
+
+ if (unlikely(mlx5e_skb_is_multicast(skb)))
+ stats->mcast_packets++;
}
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
@@ -1140,7 +1144,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
- bool consumed;
u32 frag_size;
va = page_address(di->page) + wi->offset;
@@ -1152,11 +1155,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
- rcu_read_unlock();
- if (consumed)
+ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
@@ -1446,7 +1446,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
- bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -1463,11 +1462,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
- rcu_read_unlock();
- if (consumed) {
+ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 6d5e54b964c0..0d34befc5761 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -54,6 +54,18 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
return total;
}
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = num_stats_grps - 1; i >= 0; i--)
+ if (stats_grps[i]->update_stats &&
+ stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
+ stats_grps[i]->update_stats(priv);
+}
+
void mlx5e_stats_update(struct mlx5e_priv *priv)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 9d9ee269a041..771f30cb0700 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -103,6 +103,7 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
void mlx5e_stats_update(struct mlx5e_priv *priv);
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
@@ -122,6 +123,7 @@ struct mlx5e_sw_stats {
u64 tx_nop;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
@@ -301,6 +303,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
u64 xdp_drop;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 817c503693fc..28053c3c4380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1290,11 +1290,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_put_flow_tunnel_id(flow);
- if (flow_flag_test(flow, NOT_READY)) {
+ if (flow_flag_test(flow, NOT_READY))
remove_unready_flow(flow);
- kvfree(attr->parse_attr);
- return;
- }
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
@@ -1315,6 +1312,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
kvfree(attr->parse_attr);
+ mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr);
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -2626,6 +2625,22 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
+static unsigned long mask_to_le(unsigned long mask, int size)
+{
+ __be32 mask_be32;
+ __be16 mask_be16;
+
+ if (size == 32) {
+ mask_be32 = (__force __be32)(mask);
+ mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+ } else if (size == 16) {
+ mask_be32 = (__force __be32)(mask);
+ mask_be16 = *(__be16 *)&mask_be32;
+ mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ }
+
+ return mask;
+}
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
struct pedit_headers_action *hdrs,
@@ -2639,9 +2654,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5_fields *f;
- unsigned long mask;
- __be32 mask_be32;
- __be16 mask_be16;
+ unsigned long mask, field_mask;
int err;
u8 cmd;
@@ -2707,14 +2720,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (skip)
continue;
- if (f->field_bsize == 32) {
- mask_be32 = (__force __be32)(mask);
- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (f->field_bsize == 16) {
- mask_be32 = (__force __be32)(mask);
- mask_be16 = *(__be16 *)&mask_be32;
- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
- }
+ mask = mask_to_le(mask, f->field_bsize);
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
@@ -2745,9 +2751,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (cmd == MLX5_ACTION_TYPE_SET) {
int start;
+ field_mask = mask_to_le(f->field_mask, f->field_bsize);
+
/* if field is bit sized it can start not from first bit */
- start = find_first_bit((unsigned long *)&f->field_mask,
- f->field_bsize);
+ start = find_first_bit(&field_mask, f->field_bsize);
MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
@@ -4413,8 +4420,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
/* actions validation depends on parsing the ct matches first */
- err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
- &flow->esw_attr->ct_attr, extack);
+ err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f,
+ &flow->esw_attr->ct_attr, extack);
if (err)
goto err_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index de10b06bade5..d5868670f8a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
+ bool xsk_open;
int i;
+ rcu_read_lock();
+
+ xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+
ch_stats->poll++;
for (i = 0; i < c->num_tc; i++)
@@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= busy_xsk;
if (busy) {
- if (likely(mlx5e_channel_no_affinity_change(c)))
- return budget;
+ if (likely(mlx5e_channel_no_affinity_change(c))) {
+ work_done = budget;
+ goto out;
+ }
ch_stats->aff_change++;
aff_change = true;
if (budget && work_done == budget)
@@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
}
if (unlikely(!napi_complete_done(napi, work_done)))
- return work_done;
+ goto out;
ch_stats->arm++;
@@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
ch_stats->force_irq++;
}
+out:
+ rcu_read_unlock();
+
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b23d20e16495..00bcf97cecbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1219,35 +1219,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
- /* create peer esw miss group */
- memset(flow_group_in, 0, inlen);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ /* create peer esw miss group */
+ memset(flow_group_in, 0, inlen);
- esw_set_flow_group_source_port(esw, flow_group_in);
+ esw_set_flow_group_source_port(esw, flow_group_in);
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in,
- match_criteria);
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + esw->total_vports - 1);
- ix += esw->total_vports;
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ix + esw->total_vports - 1);
+ ix += esw->total_vports;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
- goto peer_miss_err;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto peer_miss_err;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
}
- esw->fdb_table.offloads.peer_miss_grp = g;
/* create miss group */
memset(flow_group_in, 0, inlen);
@@ -1281,7 +1283,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
@@ -1305,7 +1308,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_esw_chains_destroy(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9ccec5f8b92a..75fa44eee434 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -654,7 +654,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
fte->action = *flow_act;
fte->flow_context = spec->flow_context;
- tree_init_node(&fte->node, NULL, del_sw_fte);
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
return fte;
}
@@ -1792,7 +1792,6 @@ skip_search:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
- tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1891,7 +1890,6 @@ search_again_locked:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
- tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
@@ -2001,7 +1999,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
up_write_ref_node(&fte->node, false);
} else {
del_hw_fte(&fte->node);
- up_write(&fte->node.lock);
+ /* Avoid double call to del_hw_fte */
+ fte->node.del_hw_func = NULL;
+ up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
}
kfree(handle);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 5abb7d2b0a9e..8518e1d60da4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -421,10 +421,15 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ spin_lock(&ocelot_port->ts_id_lock);
+
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in cb[0] of sk_buff */
- skb->cb[0] = ocelot_port->ts_id % 4;
+ skb->cb[0] = ocelot_port->ts_id;
+ ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
skb_queue_tail(&ocelot_port->tx_skbs, skb);
+
+ spin_unlock(&ocelot_port->ts_id_lock);
return 0;
}
return -ENODATA;
@@ -1300,6 +1305,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
+ spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
@@ -1544,18 +1550,18 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- struct ocelot_port *port;
- int i;
-
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
-
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- port = ocelot->ports[i];
- skb_queue_purge(&port->tx_skbs);
- }
}
EXPORT_SYMBOL(ocelot_deinit);
+void ocelot_deinit_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ skb_queue_purge(&ocelot_port->tx_skbs);
+}
+EXPORT_SYMBOL(ocelot_deinit_port);
+
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 0668d23cdbfa..8490e42e9e2d 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -330,6 +330,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
int port = priv->chip_port;
+ bool do_tstamp;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -344,10 +345,12 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
+ do_tstamp = (ocelot_port_add_txtstamp_skb(ocelot_port, skb) == 0);
+
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
info.rew_op = ocelot_port->ptp_cmd;
if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= (ocelot_port->ts_id % 4) << 3;
+ info.rew_op |= skb->cb[0] << 3;
}
ocelot_gen_ifh(ifh, &info);
@@ -380,12 +383,9 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
- ocelot_port->ts_id++;
- return NETDEV_TX_OK;
- }
+ if (!do_tstamp)
+ dev_kfree_skb_any(skb);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 65408bc994c4..dfb1535f26f2 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -806,17 +806,17 @@ static const struct vcap_field vsc7514_vcap_is2_keys[] = {
[VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {124, 1},
- [VCAP_IS2_HK_L4_SPORT] = {125, 16},
- [VCAP_IS2_HK_L4_DPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {141, 16},
[VCAP_IS2_HK_L4_RNG] = {157, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
- [VCAP_IS2_HK_L4_URG] = {167, 1},
- [VCAP_IS2_HK_L4_ACK] = {168, 1},
- [VCAP_IS2_HK_L4_PSH] = {169, 1},
- [VCAP_IS2_HK_L4_RST] = {170, 1},
- [VCAP_IS2_HK_L4_SYN] = {171, 1},
- [VCAP_IS2_HK_L4_FIN] = {172, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_SYN] = {168, 1},
+ [VCAP_IS2_HK_L4_RST] = {169, 1},
+ [VCAP_IS2_HK_L4_PSH] = {170, 1},
+ [VCAP_IS2_HK_L4_ACK] = {171, 1},
+ [VCAP_IS2_HK_L4_URG] = {172, 1},
[VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
[VCAP_IS2_HK_L4_1588_VER] = {181, 4},
/* IP4_OTHER (TYPE=101) */
@@ -896,11 +896,137 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_release_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+
+ ocelot_deinit_port(ocelot, port);
+
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ unregister_netdev(priv->dev);
+ free_netdev(priv->dev);
+ }
+}
+
+static int mscc_ocelot_init_ports(struct platform_device *pdev,
+ struct device_node *ports)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+ struct device_node *portnp;
+ int err;
+
+ ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ /* No NPI port */
+ ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_NONE);
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ struct phy_device *phy;
+ struct regmap *target;
+ struct resource *res;
+ struct phy *serdes;
+ char res_name[8];
+ u32 port;
+
+ if (of_property_read_u32(portnp, "reg", &port))
+ continue;
+
+ snprintf(res_name, sizeof(res_name), "port%d", port);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target))
+ continue;
+
+ phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, target, phy);
+ if (err) {
+ of_node_put(portnp);
+ return err;
+ }
+
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ ocelot_port->phy_mode = phy_mode;
+
+ switch (ocelot_port->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* Ensure clock signals and speed is set on all
+ * QSGMII links
+ */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_LINK_SPEED
+ (OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ of_node_put(portnp);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
+ of_node_put(portnp);
+ return err;
+ }
+
+ priv->serdes = serdes;
+ }
+
+ return 0;
+}
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *ports, *portnp;
int err, irq_xtr, irq_ptp_rdy;
+ struct device_node *ports;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -985,20 +1111,24 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
- dev_err(&pdev->dev, "no ethernet-ports child node found\n");
+ dev_err(ocelot->dev, "no ethernet-ports child node found\n");
return -ENODEV;
}
ocelot->num_phys_ports = of_get_child_count(ports);
- ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
- sizeof(struct ocelot_port *), GFP_KERNEL);
-
ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
ocelot->vcap = vsc7514_vcap_props;
- ocelot_init(ocelot);
+ err = ocelot_init(ocelot);
+ if (err)
+ goto out_put_ports;
+
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_put_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1008,96 +1138,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
}
- /* No NPI port */
- ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_NONE);
-
- for_each_available_child_of_node(ports, portnp) {
- struct ocelot_port_private *priv;
- struct ocelot_port *ocelot_port;
- struct device_node *phy_node;
- phy_interface_t phy_mode;
- struct phy_device *phy;
- struct regmap *target;
- struct resource *res;
- struct phy *serdes;
- char res_name[8];
- u32 port;
-
- if (of_property_read_u32(portnp, "reg", &port))
- continue;
-
- snprintf(res_name, sizeof(res_name), "port%d", port);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- target = ocelot_regmap_init(ocelot, res);
- if (IS_ERR(target))
- continue;
-
- phy_node = of_parse_phandle(portnp, "phy-handle", 0);
- if (!phy_node)
- continue;
-
- phy = of_phy_find_device(phy_node);
- of_node_put(phy_node);
- if (!phy)
- continue;
-
- err = ocelot_probe_port(ocelot, port, target, phy);
- if (err) {
- of_node_put(portnp);
- goto out_put_ports;
- }
-
- ocelot_port = ocelot->ports[port];
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
-
- of_get_phy_mode(portnp, &phy_mode);
-
- ocelot_port->phy_mode = phy_mode;
-
- switch (ocelot_port->phy_mode) {
- case PHY_INTERFACE_MODE_NA:
- continue;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- case PHY_INTERFACE_MODE_QSGMII:
- /* Ensure clock signals and speed is set on all
- * QSGMII links
- */
- ocelot_port_writel(ocelot_port,
- DEV_CLOCK_CFG_LINK_SPEED
- (OCELOT_SPEED_1000),
- DEV_CLOCK_CFG);
- break;
- default:
- dev_err(ocelot->dev,
- "invalid phy mode for port%d, (Q)SGMII only\n",
- port);
- of_node_put(portnp);
- err = -EINVAL;
- goto out_put_ports;
- }
-
- serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = PTR_ERR(serdes);
- if (err == -EPROBE_DEFER)
- dev_dbg(ocelot->dev, "deferring probe\n");
- else
- dev_err(ocelot->dev,
- "missing SerDes phys for port%d\n",
- port);
-
- of_node_put(portnp);
- goto out_put_ports;
- }
-
- priv->serdes = serdes;
- }
-
register_netdevice_notifier(&ocelot_netdevice_nb);
register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
@@ -1114,6 +1154,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ mscc_ocelot_release_ports(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6eb9fb9a1814..9c9ae33d84ce 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -829,8 +829,8 @@ nfp_port_get_fecparam(struct net_device *netdev,
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
- param->active_fec = ETHTOOL_FEC_NONE_BIT;
- param->fec = ETHTOOL_FEC_NONE_BIT;
+ param->active_fec = ETHTOOL_FEC_NONE;
+ param->fec = ETHTOOL_FEC_NONE;
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index f7f08e6a3acf..d2f5855b2ea7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4254,7 +4254,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_LL2_NON_UNICAST) |
- BIT(QED_MF_INTER_PF_SWITCH);
+ BIT(QED_MF_INTER_PF_SWITCH) |
+ BIT(QED_MF_DISABLE_ARFS);
break;
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
@@ -4267,6 +4268,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
cdev->mf_bits);
+
+ /* In CMT the PF is unknown when the GFS block processes the
+ * packet. Therefore cannot use searcher as it has a per PF
+ * database, and thus ARFS must be disabled.
+ *
+ */
+ if (QED_IS_CMT(cdev))
+ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
}
DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 4c6ac8862744..07824bf9d68d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1980,6 +1980,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params)
{
+ if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits))
+ return;
+
if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f2ab2b086946..5bd58c65e163 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -445,6 +445,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
&cdev->mf_bits);
+ if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
+ dev_info->b_arfs_capable = true;
dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f1f75b6d0421..b8dc5c4591ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -71,6 +71,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
p_ramrod->personality = PERSONALITY_ETH;
break;
case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f961f65d9372..c59b72c90293 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -311,6 +311,9 @@ int qede_alloc_arfs(struct qede_dev *edev)
{
int i;
+ if (!edev->dev_info.common.b_arfs_capable)
+ return -EINVAL;
+
edev->arfs = vzalloc(sizeof(*edev->arfs));
if (!edev->arfs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 20d2296beb79..05e3a3b60269 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -804,7 +804,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ if (edev->dev_info.common.b_arfs_capable)
hw_features |= NETIF_F_NTUPLE;
if (edev->dev_info.common.vxlan_enable ||
@@ -2295,7 +2295,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ if (edev->dev_info.common.b_arfs_capable) {
qede_poll_for_freeing_arfs_filters(edev);
qede_free_arfs(edev);
}
@@ -2362,10 +2362,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
- rc = qede_alloc_arfs(edev);
- if (rc)
- DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ if (qede_alloc_arfs(edev)) {
+ edev->ndev->features &= ~NETIF_F_NTUPLE;
+ edev->dev_info.common.b_arfs_capable = false;
}
qede_napi_add_enable(edev);
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index c54b7f8243f3..ffdb36715a49 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -490,6 +490,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
netif_err(efx, probe, efx->net_dev,
"Func control window overruns BAR\n");
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a3528c5c823f..26073cd63e33 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
@@ -2069,9 +2070,61 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused cpsw_suspend(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_stop(ndev);
+ }
+
+ rtnl_unlock();
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused cpsw_resume(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_open(ndev);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
+
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw-switch",
+ .pm = &cpsw_pm_ops,
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,