summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/agere/Kconfig1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c6
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h10
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c1
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/nxp/Kconfig1
-rw-r--r--drivers/net/ethernet/rocker/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c51
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c3
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c9
34 files changed, 235 insertions, 111 deletions
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
index d92516ae59cc..9cd750184947 100644
--- a/drivers/net/ethernet/agere/Kconfig
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -21,6 +21,7 @@ config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
depends on PCI
select PHYLIB
+ select CRC32
help
This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 85858163bac5..e432a68ac520 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -23,6 +23,7 @@ config MACB
tristate "Cadence MACB/GEM support"
depends on HAS_DMA && COMMON_CLK
select PHYLINK
+ select CRC32
help
The Cadence MACB ethernet interface is found on many Atmel AT32 and
AT91 parts. This driver also supports the Cadence GEM (Gigabit
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 7f90b828d159..1b7e8c91b541 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
u32 ctrl, iplen, maclen;
-#if IS_ENABLED(CONFIG_IPV6)
struct ipv6hdr *ip6;
-#endif
unsigned int ndesc;
struct tcphdr *tcp;
int len16, pktlen;
@@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
cpl->len = htons(pktlen);
memcpy(buf, skb->data, pktlen);
- if (tx_info->ip_family == AF_INET) {
+ if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
-#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
-#endif
}
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c2677ec0564d..3d1e9a302148 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -33,6 +33,7 @@ config FTGMAC100
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
+ select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a1d53ddf1593..3f9175bdce77 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -25,6 +25,7 @@ config FEC
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
+ select CRC32
select PHYLIB
imply PTP_1588_CLOCK
help
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 90cd243070d7..828c177df03d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
if (!of_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ of_node_put(node);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 8ed1ebd5a183..89e558135432 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -143,8 +143,8 @@ static const struct {
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
- { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
+ { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
@@ -163,9 +163,13 @@ static const struct {
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
+ { ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
+ { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
+ { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
+ { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index d18f439f2b81..e1e950d48c92 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1518 0x8198
-#define ENETC_PM0_R1519X 0x81A0
+#define ENETC_PM0_R1522 0x8198
+#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
@@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
+#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
+#define ENETC_PM0_T255 0x8280
+#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1518 0x8298
+#define ENETC_PM0_T1522 0x8298
+#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 34150182cc35..48bf8088795d 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -4,6 +4,7 @@ config FSL_FMAN
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
select PHYLIB
+ select CRC32
default n
help
Freescale Data-Path Acceleration Architecture Frame Manager
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index a9066e6ff697..ca2ab6cf84d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -35,8 +35,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
-#pragma pack(1)
-
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
-#pragma pack()
-
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b30f00891c03..128ab6898070 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
/* Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
+ mac_data &= ~BIT(28);
ew32(FEXTNVM9, mac_data);
/* Enable K1 off to enable mPHY Power Gating */
mac_data = er32(FEXTNVM6);
mac_data |= BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Enable mPHY power gating for any link and speed */
mac_data = er32(FEXTNVM8);
@@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
/* Disable K1 off */
mac_data = er32(FEXTNVM6);
mac_data &= ~BIT(31);
- ew32(FEXTNVM12, mac_data);
+ ew32(FEXTNVM6, mac_data);
/* Disable Ungate PGCB clock */
mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
+ mac_data |= BIT(28);
ew32(FEXTNVM9, mac_data);
/* Cancel not waking from dynamic
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9f73cd7aee09..4aca637d4a23 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1861,6 +1861,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
+ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
@@ -1883,7 +1884,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1894,7 +1896,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
@@ -1953,16 +1955,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
+ * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
@@ -2113,14 +2123,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer)
+ struct i40e_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
- if (i40e_can_reuse_rx_page(rx_buffer)) {
+ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2347,6 +2359,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
+ int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
@@ -2389,7 +2402,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2432,7 +2445,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
- i40e_put_rx_buffer(rx_ring, rx_buffer);
+ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 77d5eae6b4c2..a2d0aad8cfdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
+ * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
+ * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buf->page);
+#else
+ 0;
+#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
+ * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void
+ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
+ int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- ice_put_rx_buf(rx_ring, NULL);
+ ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@@ -1187,7 +1200,7 @@ construct_skb:
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 0286d2fceee4..aaa954aae574 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -138,6 +138,8 @@ struct vf_mac_filter {
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
@@ -247,6 +249,9 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+/* TX resources are shared between XDP and netstack
+ * and we need to tag the buffer type to distinguish them
+ */
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a4ef4934fcf..03f78fdb0dcd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
- int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
- struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- if (frame_size > igb_rx_bufsz(ring))
+ if (frame_size > igb_rx_bufsz(ring)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "The RX buffer size is too small for the frame size");
+ netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
+ igb_rx_bufsz(ring), frame_size);
return -EINVAL;
+ }
}
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return igb_xdp_setup(dev, xdp->prog);
+ return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
@@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
- return -ENXIO;
+ return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ nq->trans_start = jiffies;
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
+ adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
@@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
@@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
- netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ netdev_warn(adapter->netdev,
+ "Requested MTU size is not supported with XDP. Max frame size is %d\n",
+ max_frame);
return -EINVAL;
}
}
@@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
@@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
@@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 50e6b8b6ba7b..393d1c2cd853 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ *rx_buffer_pgcnt =
+#if (PAGE_SIZE < 8192)
+ page_count(rx_buffer->page);
+#else
+ 0;
+#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
@@ -2055,9 +2063,10 @@ skip_sync:
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
{
- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
+ int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 13bd58dafc6c..25dd903a3e92 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -313,8 +313,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
- if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) {
+ err = -EINVAL;
goto err_port_init;
+ }
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 157f7eef92f1..32aad4d32b88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
- en_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
+ en_dbg(DRV, priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+ }
}
@@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
+ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
+ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- watchdog_task);
+ restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@@ -2376,7 +2380,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
- /* NIC is probably restarting - let watchdog task reset
+ /* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@@ -2385,7 +2389,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
+ &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@@ -2791,7 +2797,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
- queue_work(mdev->workqueue, &priv->watchdog_task);
+ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ queue_work(mdev->workqueue, &priv->restart_task);
}
}
@@ -3164,7 +3171,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index b15ec32758a3..31b74bddb7cd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1c50d0f22199..17f2b1919378 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -251,6 +251,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
+enum {
+ MLX4_EN_TX_RING_STATE_RECOVERING,
+};
+
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@@ -297,6 +301,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
+ unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@@ -510,6 +515,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
+enum {
+ MLX4_EN_STATE_FLAG_RESTARTING,
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -575,7 +584,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
- struct work_struct watchdog_task;
+ struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@@ -620,6 +629,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
+ unsigned long state;
};
enum mlx4_en_wol {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 485478979b1a..6e4d7bb7fea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -199,6 +199,7 @@ config MLX5_EN_TLS
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
depends on MLX5_CORE_EN && MLX5_ESWITCH
+ select CRC32
default y
help
Build support for software-managed steering in the NIC.
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 31f9a82dc113..d0f6dfe0dcf3 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -47,6 +47,7 @@ config LAN743X
depends on PCI
select PHYLIB
select CRC16
+ select CRC32
help
Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 2632fe2d2448..abea8dd2b0cb 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1551,10 +1551,11 @@ int ocelot_init(struct ocelot *ocelot)
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
- ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
- ANA_FLOODING_FLD_UNICAST(PGID_UC),
- ANA_FLOODING, 0);
+ for (i = 0; i < ocelot->num_flooding_pgids; i++)
+ ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST(PGID_UC),
+ ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index dc00772950e5..1e7729421a82 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
ocelot->num_phys_ports = of_get_child_count(ports);
+ ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index d8b99d6a0356..b82758d5beed 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -22,6 +22,7 @@ config NFP
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
+ select CRC32
help
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index b4acf2f41e84..f21fb573ea3e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
struct nfp_net_dp *dp;
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
- return -EBUSY;
-
if (!prog == !nn->dp.xdp_prog) {
WRITE_ONCE(nn->dp.xdp_prog, prog);
xdp_attachment_setup(&nn->xdp, bpf);
@@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
{
int err;
- if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
- return -EBUSY;
-
err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig
index ee83a71c2509..c84997db828c 100644
--- a/drivers/net/ethernet/nxp/Kconfig
+++ b/drivers/net/ethernet/nxp/Kconfig
@@ -3,6 +3,7 @@ config LPC_ENET
tristate "NXP ethernet MAC on LPC devices"
depends on ARCH_LPC32XX || COMPILE_TEST
select PHYLIB
+ select CRC32
help
Say Y or M here if you want to use the NXP ethernet MAC included on
some NXP LPC devices. You can safely enable this option for LPC32xx
diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig
index 99e1290e0307..2318811ff75a 100644
--- a/drivers/net/ethernet/rocker/Kconfig
+++ b/drivers/net/ethernet/rocker/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER
config ROCKER
tristate "Rocker switch driver (EXPERIMENTAL)"
depends on PCI && NET_SWITCHDEV && BRIDGE
+ select CRC32
help
This driver supports Rocker switch device.
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index efef5476a577..223f69da7e95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
goto err_parse_dt;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(dwmac->ops->addr_width));
- if (ret) {
- dev_err(&pdev->dev, "DMA mask set failed\n");
- goto err_dma_mask;
- }
-
+ plat_dat->addr64 = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
@@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
err_dwmac_init:
err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dma_mask:
err_parse_dt:
err_match_data:
stmmac_remove_config_dt(pdev, plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index dc0b8b6d180d..459ae715b33d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -30,7 +30,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
@@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
return -ENOMEM;
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
ARRAY_SIZE(mux_parents), &clk_mux_ops,
&clk_configs->m250_mux.hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 6e30d7eb4983..0b4ee2dbb691 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr)
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 10000, 100000);
+ 10000, 1000000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c1ac75901ce..5b1c12ff98c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1558,6 +1558,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
}
/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @priv: private structure
*/
@@ -2925,9 +2938,6 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (priv->eee_enabled)
- del_timer_sync(&priv->eee_ctrl_timer);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -2946,6 +2956,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4960,6 +4975,14 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "SPH feature enabled\n");
}
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -5172,6 +5195,11 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
@@ -5277,11 +5305,20 @@ int stmmac_resume(struct device *dev)
return ret;
}
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ rtnl_lock();
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+ rtnl_unlock();
+ }
+
rtnl_lock();
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
+ stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
@@ -5295,14 +5332,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
- }
-
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6dd73bd0f458..99f44563e10f 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
if (!priv->xdpi.prog && !prog)
return 0;
- if (!xdp_attachment_flags_ok(&priv->xdpi, bpf))
- return -EBUSY;
-
WRITE_ONCE(priv->xdp_prog, prog);
xdp_attachment_setup(&priv->xdpi, bpf);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 60c199fcb91e..030185301014 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev)
struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
- struct resource *res;
const void *addr;
__be32 *p;
bool little_endian;
@@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!lp->sdma_regs) {
+ lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(lp->sdma_regs);
}
if (pdata->dma_little_endian) {
lp->dma_in = temac_dma_in32_le;