summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c417
1 files changed, 303 insertions, 114 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4727f7be4f86..9a3182b9e767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -39,6 +39,7 @@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
@@ -325,7 +326,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_250_300M;
}
- if (priv->plat->has_sun8i) {
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
if (clk_rate > 160000000)
priv->clk_csr = 0x03;
else if (clk_rate > 80000000)
@@ -421,7 +422,7 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->en_tx_lpi_clockgating);
+ priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
return 0;
}
@@ -909,6 +910,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
+ if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
+ stmmac_hwtstamp_correct_latency(priv, priv);
+
return 0;
}
@@ -991,7 +995,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 old_ctrl, ctrl;
- if (priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup)
+ if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup)
priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -1059,7 +1064,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
priv->speed = speed;
if (priv->plat->fix_mac_speed)
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
if (!duplex)
ctrl &= ~priv->hw->link.duplex;
@@ -1084,7 +1089,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
priv->eee_active =
- phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+ phy_init_eee(phy, !(priv->plat->flags &
+ STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1092,6 +1098,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (priv->dma_cap.fpesel)
stmmac_fpe_link_state_handle(priv, true);
+
+ if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
+ stmmac_hwtstamp_correct_latency(priv, priv);
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
@@ -1110,7 +1119,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->interface;
+ int interface = priv->plat->mac_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1144,7 +1153,7 @@ static int stmmac_init_phy(struct net_device *dev)
if (!phylink_expects_phy(priv->phylink))
return 0;
- fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1190,22 +1199,24 @@ static int stmmac_init_phy(struct net_device *dev)
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
- struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
- struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
- int max_speed = priv->plat->max_speed;
+ struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
+ struct fwnode_handle *fwnode;
struct phylink *phylink;
+ int max_speed;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- if (priv->plat->mdio_bus_data)
+ priv->phylink_config.mac_managed_pm = true;
+
+ mdio_bus_data = priv->plat->mdio_bus_data;
+ if (mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
- if (!fwnode)
- fwnode = dev_fwnode(priv->device);
-
- /* Set the platform/firmware specified interface mode */
+ /* Set the platform/firmware specified interface mode. Note, phylink
+ * deals with the PHY interface mode, not the MAC interface mode.
+ */
__set_bit(mode, priv->phylink_config.supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
@@ -1214,36 +1225,24 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.supported_interfaces);
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100;
-
- if (!max_speed || max_speed >= 1000)
- priv->phylink_config.mac_capabilities |= MAC_1000;
-
- if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500)
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || max_speed >= 2500)
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
- if (!max_speed || max_speed >= 5000)
- priv->phylink_config.mac_capabilities |= MAC_5000FD;
- if (!max_speed || max_speed >= 10000)
- priv->phylink_config.mac_capabilities |= MAC_10000FD;
- if (!max_speed || max_speed >= 25000)
- priv->phylink_config.mac_capabilities |= MAC_25000FD;
- if (!max_speed || max_speed >= 40000)
- priv->phylink_config.mac_capabilities |= MAC_40000FD;
- if (!max_speed || max_speed >= 50000)
- priv->phylink_config.mac_capabilities |= MAC_50000FD;
- if (!max_speed || max_speed >= 100000)
- priv->phylink_config.mac_capabilities |= MAC_100000FD;
- }
+ MAC_10FD | MAC_100FD |
+ MAC_1000FD;
/* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- priv->phylink_config.mac_managed_pm = true;
+ if (priv->plat->tx_queues_to_use <= 1)
+ priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
+ MAC_1000HD;
+
+ /* Get the MAC specific capabilities */
+ stmmac_mac_phylink_get_caps(priv);
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
+ fwnode = priv->plat->port_node;
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
@@ -2432,6 +2431,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct dma_desc *tx_desc = NULL;
struct xdp_desc xdp_desc;
bool work_done = true;
+ u32 tx_set_ic_bit = 0;
+ unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2492,7 +2493,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ tx_set_ic_bit++;
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
@@ -2504,6 +2505,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2545,11 +2549,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
+ u32 tx_packets = 0, tx_errors = 0;
+ unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
- priv->xstats.tx_clean++;
-
tx_q->xsk_frames_done = 0;
entry = tx_q->dirty_tx;
@@ -2580,8 +2584,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
else
p = tx_q->dma_tx + entry;
- status = stmmac_tx_status(priv, &priv->dev->stats,
- &priv->xstats, p, priv->ioaddr);
+ status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
/* Check if the descriptor is owned by the DMA */
if (unlikely(status & tx_dma_own))
break;
@@ -2597,13 +2600,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (likely(!(status & tx_not_ls))) {
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
- priv->dev->stats.tx_errors++;
+ tx_errors++;
if (unlikely(status & tx_err_bump_tc))
stmmac_bump_dma_threshold(priv, queue);
} else {
- priv->dev->stats.tx_packets++;
- priv->xstats.tx_pkt_n++;
- priv->xstats.txq_stats[queue].tx_pkt_n++;
+ tx_packets++;
}
if (skb)
stmmac_get_tx_hwtstamp(priv, p, skb);
@@ -2707,6 +2708,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
HRTIMER_MODE_REL);
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_packets += tx_packets;
+ tx_q->txq_stats.tx_pkt_n += tx_packets;
+ tx_q->txq_stats.tx_clean++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+
+ priv->xstats.tx_errors += tx_errors;
+
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
/* Combine decisions from TX clean and XSK TX */
@@ -2734,7 +2743,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
- priv->dev->stats.tx_errors++;
+ priv->xstats.tx_errors++;
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
}
@@ -3710,7 +3719,7 @@ static int stmmac_request_irq(struct net_device *dev)
int ret;
/* Request the IRQ lines */
- if (priv->plat->multi_msi_en)
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
ret = stmmac_request_irq_multi_msi(dev);
else
ret = stmmac_request_irq_single(dev);
@@ -3827,10 +3836,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- /* Extra statistics */
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
@@ -3838,7 +3843,8 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
if (ret < 0) {
netdev_err(priv->dev, "%s: Serdes powerup failed\n",
@@ -4110,6 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
+ unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4258,7 +4265,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4274,9 +4280,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
- priv->xstats.tx_tso_frames++;
- priv->xstats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ tx_q->txq_stats.tx_tso_frames++;
+ tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4326,7 +4336,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4352,6 +4362,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
+ unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4480,7 +4491,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
}
/* We've used all descriptors we need for this skb, however,
@@ -4507,7 +4517,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- dev->stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_bytes += skb->len;
+ if (set_ic)
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4569,7 +4583,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
- priv->dev->stats.tx_dropped++;
+ priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -4770,9 +4784,12 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
+ unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- priv->xstats.tx_set_ic_bit++;
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4917,16 +4934,18 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
+ unsigned long flags;
struct sk_buff *skb;
u32 hash;
skb = stmmac_construct_skb_zc(ch, xdp);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ priv->xstats.rx_dropped++;
return;
}
@@ -4945,8 +4964,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n++;
+ rx_q->rxq_stats.rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5023,9 +5044,11 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
+ u32 rx_errors = 0, rx_dropped = 0;
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
+ unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5081,8 +5104,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5104,8 +5126,7 @@ read_again:
break;
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats,
+ stmmac_rx_extended_status(priv, &priv->xstats,
rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
xsk_buff_free(buf->xdp);
@@ -5113,7 +5134,7 @@ read_again:
dirty++;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5161,7 +5182,7 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
break;
case STMMAC_XDP_TX:
case STMMAC_XDP_REDIRECT:
@@ -5182,8 +5203,12 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
if (failure || stmmac_rx_dirty(priv, queue) > 0)
@@ -5207,6 +5232,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
+ u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5216,6 +5242,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
+ unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5271,8 +5298,7 @@ read_again:
p = rx_q->dma_rx + entry;
/* read the status of the incoming frame */
- status = stmmac_rx_status(priv, &priv->dev->stats,
- &priv->xstats, p);
+ status = stmmac_rx_status(priv, &priv->xstats, p);
/* check if managed by the DMA otherwise go ahead */
if (unlikely(status & dma_own))
break;
@@ -5289,14 +5315,13 @@ read_again:
prefetch(np);
if (priv->extend_desc)
- stmmac_rx_extended_status(priv, &priv->dev->stats,
- &priv->xstats, rx_q->dma_erx + entry);
+ stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
- priv->dev->stats.rx_errors++;
+ rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
@@ -5364,7 +5389,7 @@ read_again:
virt_to_head_page(ctx.xdp.data),
sync_len, true);
buf->page = NULL;
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
/* Clear skb as it was set as
* status by XDP program.
@@ -5393,7 +5418,7 @@ read_again:
skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
if (!skb) {
- priv->dev->stats.rx_dropped++;
+ rx_dropped++;
count++;
goto drain_data;
}
@@ -5413,7 +5438,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5425,7 +5450,7 @@ read_again:
priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5453,8 +5478,8 @@ drain_data:
napi_gro_receive(&ch->rx_napi, skb);
skb = NULL;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += len;
+ rx_packets++;
+ rx_bytes += len;
count++;
}
@@ -5469,8 +5494,14 @@ drain_data:
stmmac_rx_refill(priv, queue);
- priv->xstats.rx_pkt_n += count;
- priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.rx_packets += rx_packets;
+ rx_q->rxq_stats.rx_bytes += rx_bytes;
+ rx_q->rxq_stats.rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+
+ priv->xstats.rx_dropped += rx_dropped;
+ priv->xstats.rx_errors += rx_errors;
return count;
}
@@ -5480,10 +5511,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_rx_queue *rx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5502,10 +5538,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
int work_done;
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
@@ -5527,9 +5568,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
u32 chan = ch->index;
+ unsigned long flags;
+
+ rx_q = &priv->dma_conf.rx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
+ rx_q->rxq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
- priv->xstats.napi_poll++;
+ tx_q = &priv->dma_conf.tx_queue[chan];
+ flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
+ tx_q->txq_stats.napi_poll++;
+ u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
@@ -5677,7 +5729,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
features &= ~NETIF_F_CSUM_MASK;
/* Disable tso if asked by ethtool */
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
if (features & NETIF_F_TSO)
priv->tso = true;
else
@@ -5798,7 +5850,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
}
/* PCS link status */
- if (priv->hw->pcs && !priv->plat->has_integrated_pcs) {
+ if (priv->hw->pcs &&
+ !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
if (priv->xstats.pcs_link)
netif_carrier_on(priv->dev);
else
@@ -5951,7 +6004,7 @@ static void stmmac_poll_controller(struct net_device *dev)
if (test_bit(STMMAC_DOWN, &priv->state))
return;
- if (priv->plat->multi_msi_en) {
+ if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
@@ -6174,6 +6227,22 @@ DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
{
+ static const char * const dwxgmac_timestamp_source[] = {
+ "None",
+ "Internal",
+ "External",
+ "Both",
+ };
+ static const char * const dwxgmac_safety_feature_desc[] = {
+ "No",
+ "All Safety Features with ECC and Parity",
+ "All Safety Features without ECC or Parity",
+ "All Safety Features with Parity Only",
+ "ECC Only",
+ "UNDEFINED",
+ "UNDEFINED",
+ "UNDEFINED",
+ };
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -6192,10 +6261,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- seq_printf(seq, "\tHash Filter: %s\n",
- (priv->dma_cap.hash_filter) ? "Y" : "N");
- seq_printf(seq, "\tMultiple MAC address registers: %s\n",
- (priv->dma_cap.multi_addr) ? "Y" : "N");
+ if (priv->plat->has_xgmac) {
+ seq_printf(seq,
+ "\tNumber of Additional MAC address registers: %d\n",
+ priv->dma_cap.multi_addr);
+ } else {
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ }
seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
@@ -6210,12 +6285,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
+ if (priv->plat->has_xgmac)
+ seq_printf(seq, "\tTimestamp System Time Source: %s\n",
+ dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
+ priv->plat->has_xgmac) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6223,9 +6302,9 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.rx_coe_type1) ? "Y" : "N");
seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
(priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
}
- seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
- (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
@@ -6238,12 +6317,13 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.enh_desc) ? "Y" : "N");
seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
- seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
+ (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
priv->dma_cap.pps_out_num);
seq_printf(seq, "\tSafety Features: %s\n",
- priv->dma_cap.asp ? "Y" : "N");
+ dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
seq_printf(seq, "\tFlexible RX Parser: %s\n",
priv->dma_cap.frpsel ? "Y" : "N");
seq_printf(seq, "\tEnhanced Addressing: %d\n",
@@ -6268,6 +6348,53 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
priv->dma_cap.fpesel ? "Y" : "N");
seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
priv->dma_cap.tbssel ? "Y" : "N");
+ seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
+ priv->dma_cap.tbs_ch_num);
+ seq_printf(seq, "\tPer-Stream Filtering: %s\n",
+ priv->dma_cap.sgfsel ? "Y" : "N");
+ seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
+ BIT(priv->dma_cap.ttsfd) >> 1);
+ seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
+ priv->dma_cap.numtc);
+ seq_printf(seq, "\tDCB Feature: %s\n",
+ priv->dma_cap.dcben ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
+ priv->dma_cap.advthword ? "Y" : "N");
+ seq_printf(seq, "\tPTP Offload: %s\n",
+ priv->dma_cap.ptoen ? "Y" : "N");
+ seq_printf(seq, "\tOne-Step Timestamping: %s\n",
+ priv->dma_cap.osten ? "Y" : "N");
+ seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
+ priv->dma_cap.pfcen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
+ BIT(priv->dma_cap.frpes) << 6);
+ seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
+ BIT(priv->dma_cap.frpbs) << 6);
+ seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
+ priv->dma_cap.frppipe_num);
+ seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
+ priv->dma_cap.nrvf_num ?
+ (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
+ seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
+ priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
+ seq_printf(seq, "\tDepth of GCL: %lu\n",
+ priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
+ seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
+ priv->dma_cap.cbtisel ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
+ priv->dma_cap.aux_snapshot_n);
+ seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
+ priv->dma_cap.pou_ost_en ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced DMA: %s\n",
+ priv->dma_cap.edma ? "Y" : "N");
+ seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
+ priv->dma_cap.ediffc ? "Y" : "N");
+ seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
+ priv->dma_cap.vxn ? "Y" : "N");
+ seq_printf(seq, "\tDebug Memory Interface: %s\n",
+ priv->dma_cap.dbgmem ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
+ priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
@@ -6788,6 +6915,56 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return 0;
}
+static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int start;
+ int q;
+
+ for (q = 0; q < tx_cnt; q++) {
+ struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ tx_packets = txq_stats->tx_packets;
+ tx_bytes = txq_stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ for (q = 0; q < rx_cnt; q++) {
+ struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ rx_packets = rxq_stats->rx_packets;
+ rx_bytes = rxq_stats->rx_bytes;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ }
+
+ stats->rx_dropped = priv->xstats.rx_dropped;
+ stats->rx_errors = priv->xstats.rx_errors;
+ stats->tx_dropped = priv->xstats.tx_dropped;
+ stats->tx_errors = priv->xstats.tx_errors;
+ stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
+ stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
+ stats->rx_length_errors = priv->xstats.rx_length;
+ stats->rx_crc_errors = priv->xstats.rx_crc_errors;
+ stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
+ stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
+}
+
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
@@ -6798,6 +6975,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6855,7 +7033,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
int ret;
/* dwmac-sun8i only work in chain mode */
- if (priv->plat->has_sun8i)
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
chain_mode = 1;
priv->chain_mode = chain_mode;
@@ -6876,7 +7054,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
- !priv->plat->use_phy_wol;
+ !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
@@ -6920,7 +7098,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
- priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q_en =
+ (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
/* Run HW quirks, if any */
@@ -7160,12 +7339,18 @@ int stmmac_dvr_probe(struct device *device,
priv->device = device;
priv->dev = ndev;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
- priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+ priv->plat->dma_cfg->multi_msi_en =
+ (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
@@ -7249,7 +7434,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features |= NETIF_F_HW_TC;
}
- if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (priv->plat->has_gmac4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
@@ -7257,7 +7442,8 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
+ if (priv->dma_cap.sphen &&
+ !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
@@ -7315,6 +7501,8 @@ int stmmac_dvr_probe(struct device *device,
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ priv->xstats.threshold = tc;
+
/* Initialize RSS */
rxq = priv->plat->rx_queues_to_use;
netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
@@ -7621,7 +7809,8 @@ int stmmac_resume(struct device *dev)
stmmac_mdio_reset(priv->mii);
}
- if (!priv->plat->serdes_up_after_phy_linkup && priv->plat->serdes_powerup) {
+ if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
+ priv->plat->serdes_powerup) {
ret = priv->plat->serdes_powerup(ndev,
priv->plat->bsp_priv);