diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1037 |
1 files changed, 496 insertions, 541 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7c6aef033a45..f867d6f06849 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -43,6 +43,7 @@ #include <net/pkt_cls.h> #include <net/xdp_sock_drv.h> #include "stmmac_ptp.h" +#include "stmmac_fpe.h" #include "stmmac.h" #include "stmmac_xdp.h" #include <linux/reset.h> @@ -76,7 +77,6 @@ module_param(phyaddr, int, 0444); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) -#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) /* Limit to make sure XDP TX and slow path can coexist */ #define STMMAC_XSK_TX_BUDGET_MAX 256 @@ -88,13 +88,13 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_REDIRECT BIT(2) -static int flow_ctrl = FLOW_AUTO; +static int flow_ctrl = 0xdead; module_param(flow_ctrl, int, 0644); -MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)"); static int pause = PAUSE_TIME; module_param(pause, int, 0644); -MODULE_PARM_DESC(pause, "Flow Control Pause Time"); +MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)"); #define TC_DEFAULT 64 static int tc = TC_DEFAULT; @@ -106,15 +106,13 @@ static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, 0644); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); -#define STMMAC_RX_COPYBREAK 256 - static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); #define STMMAC_DEFAULT_LPI_TIMER 1000 -static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; -module_param(eee_timer, int, 0644); +static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER; +module_param(eee_timer, uint, 0644); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) @@ -180,6 +178,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); /** + * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock + * @bsp_priv: BSP private data structure (unused) + * @clk_tx_i: the transmit clock + * @interface: the selected interface mode + * @speed: the speed that the MAC will be operating at + * + * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps, + * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least + * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into + * the plat_data->set_clk_tx_rate method directly, call it via their own + * implementation, or implement their own method should they have more + * complex requirements. It is intended to only be used in this method. + * + * plat_data->clk_tx_i must be filled in. + */ +int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) +{ + long rate = rgmii_clock(speed); + + /* Silently ignore unsupported speeds as rgmii_clock() only + * supports 10, 100 and 1000Mbps. We do not want to spit + * errors for 2500 and higher speeds here. + */ + if (rate < 0) + return 0; + + return clk_set_rate(clk_tx_i, rate); +} +EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate); + +/** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of * errors. @@ -190,14 +220,11 @@ static void stmmac_verify_args(void) watchdog = TX_TIMEO; if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DEFAULT_BUFSIZE; - if (unlikely(flow_ctrl > 1)) - flow_ctrl = FLOW_AUTO; - else if (likely(flow_ctrl < 0)) - flow_ctrl = FLOW_OFF; if (unlikely((pause < 0) || (pause > 0xffff))) pause = PAUSE_TIME; - if (eee_timer < 0) - eee_timer = STMMAC_DEFAULT_LPI_TIMER; + + if (flow_ctrl != 0xdead) + pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n"); } static void __stmmac_disable_all_queues(struct stmmac_priv *priv) @@ -300,7 +327,7 @@ static void stmmac_global_err(struct stmmac_priv *priv) */ static void stmmac_clk_csr_set(struct stmmac_priv *priv) { - u32 clk_rate; + unsigned long clk_rate; clk_rate = clk_get_rate(priv->plat->stmmac_clk); @@ -324,6 +351,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_150_250M; else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; + else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M)) + priv->clk_csr = STMMAC_CSR_300_500M; + else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M)) + priv->clk_csr = STMMAC_CSR_500_800M; } if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { @@ -390,23 +421,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) return dirty; } -static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) -{ - int tx_lpi_timer; - - /* Clear/set the SW EEE timer flag based on LPI ET enablement */ - priv->eee_sw_timer_en = en ? 0 : 1; - tx_lpi_timer = en ? priv->tx_lpi_timer : 0; - stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); -} - -/** - * stmmac_enable_eee_mode - check and enter in LPI mode - * @priv: driver private structure - * Description: this function is to verify and enter in LPI mode in case of - * EEE. - */ -static int stmmac_enable_eee_mode(struct stmmac_priv *priv) +static bool stmmac_eee_tx_busy(struct stmmac_priv *priv) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 queue; @@ -416,31 +431,46 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv) struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; if (tx_q->dirty_tx != tx_q->cur_tx) - return -EBUSY; /* still unfinished work */ + return true; /* still unfinished work */ } - /* Check and enter in LPI mode */ - if (!priv->tx_path_in_lpi_mode) - stmmac_set_eee_mode(priv, priv->hw, - priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); - return 0; + return false; +} + +static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv) +{ + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); } /** - * stmmac_disable_eee_mode - disable and exit from LPI mode + * stmmac_try_to_start_sw_lpi - check and enter in LPI mode * @priv: driver private structure - * Description: this function is to exit and disable EEE in case of - * LPI state is true. This is called by the xmit. + * Description: this function is to verify and enter in LPI mode in case of + * EEE. */ -void stmmac_disable_eee_mode(struct stmmac_priv *priv) +static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv) { - if (!priv->eee_sw_timer_en) { - stmmac_lpi_entry_timer_config(priv, 0); + if (stmmac_eee_tx_busy(priv)) { + stmmac_restart_sw_lpi_timer(priv); return; } - stmmac_reset_eee_mode(priv, priv->hw); + /* Check and enter in LPI mode */ + if (!priv->tx_path_in_lpi_mode) + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED, + priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING, + 0); +} + +/** + * stmmac_stop_sw_lpi - stop transmitting LPI + * @priv: driver private structure + * Description: When using software-controlled LPI, stop transmitting LPI state. + */ +static void stmmac_stop_sw_lpi(struct stmmac_priv *priv) +{ del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); priv->tx_path_in_lpi_mode = false; } @@ -455,74 +485,7 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t) { struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); - if (stmmac_enable_eee_mode(priv)) - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); -} - -/** - * stmmac_eee_init - init EEE - * @priv: driver private structure - * Description: - * if the GMAC supports the EEE (from the HW cap reg) and the phy device - * can also manage EEE, this function enable the LPI state and start related - * timer. - */ -bool stmmac_eee_init(struct stmmac_priv *priv) -{ - int eee_tw_timer = priv->eee_tw_timer; - - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if (priv->hw->pcs == STMMAC_PCS_TBI || - priv->hw->pcs == STMMAC_PCS_RTBI) - return false; - - /* Check if MAC core supports the EEE feature. */ - if (!priv->dma_cap.eee) - return false; - - mutex_lock(&priv->lock); - - /* Check if it needs to be deactivated */ - if (!priv->eee_active) { - if (priv->eee_enabled) { - netdev_dbg(priv->dev, "disable EEE\n"); - stmmac_lpi_entry_timer_config(priv, 0); - del_timer_sync(&priv->eee_ctrl_timer); - stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); - if (priv->hw->xpcs) - xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - false); - } - mutex_unlock(&priv->lock); - return false; - } - - if (priv->eee_active && !priv->eee_enabled) { - timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); - stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, - eee_tw_timer); - if (priv->hw->xpcs) - xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - true); - } - - if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { - del_timer_sync(&priv->eee_ctrl_timer); - priv->tx_path_in_lpi_mode = false; - stmmac_lpi_entry_timer_config(priv, 1); - } else { - stmmac_lpi_entry_timer_config(priv, 0); - mod_timer(&priv->eee_ctrl_timer, - STMMAC_LPI_T(priv->tx_lpi_timer)); - } - - mutex_unlock(&priv->lock); - netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); - return true; + stmmac_try_to_start_sw_lpi(priv); } /* stmmac_get_tx_hwtstamp - get HW TX timestamps @@ -926,26 +889,45 @@ static void stmmac_release_ptp(struct stmmac_priv *priv) * stmmac_mac_flow_ctrl - Configure flow control in all queues * @priv: driver private structure * @duplex: duplex passed to the next function + * @flow_ctrl: desired flow control modes * Description: It is used for configuring the flow control in all queues */ -static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex, + unsigned int flow_ctrl) { u32 tx_cnt = priv->plat->tx_queues_to_use; - stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time, + tx_cnt); +} + +static unsigned long stmmac_mac_get_caps(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + /* Refresh the MAC-specific capabilities */ + stmmac_mac_update_caps(priv); + + config->mac_capabilities = priv->hw->link.caps; + + if (priv->plat->max_speed) + phylink_limit_mac_speed(config, priv->plat->max_speed); + + return config->mac_capabilities; } static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + struct phylink_pcs *pcs; - if (priv->hw->xpcs) - return &priv->hw->xpcs->pcs; - - if (priv->hw->lynx_pcs) - return priv->hw->lynx_pcs; + if (priv->plat->select_pcs) { + pcs = priv->plat->select_pcs(priv, interface); + if (!IS_ERR(pcs)) + return pcs; + } return NULL; } @@ -956,34 +938,16 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, /* Nothing to do, xpcs_config() handles everything */ } -static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) -{ - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - - if (is_up && *hs_enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, - MPACKET_VERIFY); - } else { - *lo_state = FPE_STATE_OFF; - *lp_state = FPE_STATE_OFF; - } -} - static void stmmac_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); stmmac_mac_set(priv, priv->ioaddr, false); - priv->eee_active = false; - priv->tx_lpi_enabled = false; - priv->eee_enabled = stmmac_eee_init(priv); - stmmac_set_eee_pls(priv, priv->hw, false); + if (priv->dma_cap.eee) + stmmac_set_eee_pls(priv, priv->hw, false); - if (priv->dma_cap.fpesel) + if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, false); } @@ -994,7 +958,9 @@ static void stmmac_mac_link_up(struct phylink_config *config, bool tx_pause, bool rx_pause) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + unsigned int flow_ctrl; u32 old_ctrl, ctrl; + int ret; if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && priv->plat->serdes_powerup) @@ -1074,41 +1040,105 @@ static void stmmac_mac_link_up(struct phylink_config *config, /* Flow Control operation */ if (rx_pause && tx_pause) - priv->flow_ctrl = FLOW_AUTO; + flow_ctrl = FLOW_AUTO; else if (rx_pause && !tx_pause) - priv->flow_ctrl = FLOW_RX; + flow_ctrl = FLOW_RX; else if (!rx_pause && tx_pause) - priv->flow_ctrl = FLOW_TX; + flow_ctrl = FLOW_TX; else - priv->flow_ctrl = FLOW_OFF; + flow_ctrl = FLOW_OFF; - stmmac_mac_flow_ctrl(priv, duplex); + stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl); if (ctrl != old_ctrl) writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + if (priv->plat->set_clk_tx_rate) { + ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv, + priv->plat->clk_tx_i, + interface, speed); + if (ret < 0) + netdev_err(priv->dev, + "failed to configure transmit clock for %dMbps: %pe\n", + speed, ERR_PTR(ret)); + } + stmmac_mac_set(priv, priv->ioaddr, true); - if (phy && priv->dma_cap.eee) { - priv->eee_active = - phy_init_eee(phy, !(priv->plat->flags & - STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; - priv->eee_enabled = stmmac_eee_init(priv); - priv->tx_lpi_enabled = priv->eee_enabled; + if (priv->dma_cap.eee) stmmac_set_eee_pls(priv, priv->hw, true); - } - if (priv->dma_cap.fpesel) + if (stmmac_fpe_supported(priv)) stmmac_fpe_link_state_handle(priv, true); if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) stmmac_hwtstamp_correct_latency(priv, priv); } +static void stmmac_mac_disable_tx_lpi(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + priv->eee_active = false; + + mutex_lock(&priv->lock); + + priv->eee_enabled = false; + + netdev_dbg(priv->dev, "disable EEE\n"); + priv->eee_sw_timer_en = false; + del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); + priv->tx_path_in_lpi_mode = false; + + stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS); + mutex_unlock(&priv->lock); +} + +static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, + phy_interface_t interface, u32 timer, + bool tx_clk_stop) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + int ret; + + priv->tx_lpi_timer = timer; + priv->eee_active = true; + + mutex_lock(&priv->lock); + + priv->eee_enabled = true; + + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, + STMMAC_DEFAULT_TWT_LS); + + /* Try to cnfigure the hardware timer. */ + ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER, + priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING, + priv->tx_lpi_timer); + + if (ret) { + /* Hardware timer mode not supported, or value out of range. + * Fall back to using software LPI mode + */ + priv->eee_sw_timer_en = true; + stmmac_restart_sw_lpi_timer(priv); + } + + mutex_unlock(&priv->lock); + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); + + return 0; +} + static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, .mac_link_up = stmmac_mac_link_up, + .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi, + .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi, }; /** @@ -1187,6 +1217,21 @@ static int stmmac_init_phy(struct net_device *dev) ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); } + if (ret == 0) { + struct ethtool_keee eee; + + /* Configure phylib's copy of the LPI timer. Normally, + * phylink_config.lpi_timer_default would do this, but there is + * a chance that userspace could change the eee_timer setting + * via sysfs before the first open. Thus, preserve existing + * behaviour. + */ + if (!phylink_ethtool_get_eee(priv->phylink, &eee)) { + eee.tx_lpi_timer = priv->tx_lpi_timer; + phylink_ethtool_set_eee(priv->phylink, &eee); + } + } + if (!priv->plat->pmt) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; @@ -1198,33 +1243,28 @@ static int stmmac_init_phy(struct net_device *dev) return ret; } -static void stmmac_set_half_duplex(struct stmmac_priv *priv) -{ - /* Half-Duplex can only work with single tx queue */ - if (priv->plat->tx_queues_to_use > 1) - priv->phylink_config.mac_capabilities &= - ~(MAC_10HD | MAC_100HD | MAC_1000HD); - else - priv->phylink_config.mac_capabilities |= - (MAC_10HD | MAC_100HD | MAC_1000HD); -} - static int stmmac_phy_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; + struct phylink_pcs *pcs; struct phylink *phylink; - int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.mac_managed_pm = true; + /* Stmmac always requires an RX clock for hardware initialization */ + priv->phylink_config.mac_requires_rxc = true; + + if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) + priv->phylink_config.eee_rx_clk_stop_enable = true; + mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) - priv->phylink_config.ovr_an_inband = - mdio_bus_data->xpcs_an_inband; + priv->phylink_config.default_an_inband = + mdio_bus_data->default_an_inband; /* Set the platform/firmware specified interface mode. Note, phylink * deals with the PHY interface mode, not the MAC interface mode. @@ -1233,21 +1273,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) /* If we have an xpcs, it defines which PHY interfaces are supported. */ if (priv->hw->xpcs) - xpcs_get_interfaces(priv->hw->xpcs, - priv->phylink_config.supported_interfaces); - - priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10FD | MAC_100FD | - MAC_1000FD; + pcs = xpcs_to_phylink_pcs(priv->hw->xpcs); + else + pcs = priv->hw->phylink_pcs; - stmmac_set_half_duplex(priv); + if (pcs) + phy_interface_or(priv->phylink_config.supported_interfaces, + priv->phylink_config.supported_interfaces, + pcs->supported_interfaces); - /* Get the MAC specific capabilities */ - stmmac_mac_phylink_get_caps(priv); + if (priv->dma_cap.eee) { + /* Assume all supported interfaces also support LPI */ + memcpy(priv->phylink_config.lpi_interfaces, + priv->phylink_config.supported_interfaces, + sizeof(priv->phylink_config.lpi_interfaces)); - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); + /* All full duplex speeds above 100Mbps are supported */ + priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) | + MAC_100FD; + priv->phylink_config.lpi_timer_default = eee_timer * 1000; + priv->phylink_config.eee_enabled_default = true; + } fwnode = priv->plat->port_node; if (!fwnode) @@ -1330,6 +1376,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv, stmmac_display_tx_rings(priv, dma_conf); } +static unsigned int stmmac_rx_offset(struct stmmac_priv *priv) +{ + if (stmmac_xdp_is_enabled(priv)) + return XDP_PACKET_HEADROOM; + + return NET_SKB_PAD; +} + static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; @@ -2026,22 +2080,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_channel *ch = &priv->channel[queue]; bool xdp_prog = stmmac_xdp_is_enabled(priv); struct page_pool_params pp_params = { 0 }; - unsigned int num_pages; + unsigned int dma_buf_sz_pad, num_pages; unsigned int napi_id; int ret; + dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE); + rx_q->queue_index = queue; rx_q->priv_data = priv; + rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE; pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pp_params.pool_size = dma_conf->dma_rx_size; - num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); - pp_params.order = ilog2(num_pages); + pp_params.order = order_base_2(num_pages); pp_params.nid = dev_to_node(priv->device); pp_params.dev = priv->device; pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; pp_params.offset = stmmac_rx_offset(priv); - pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); + pp_params.max_len = dma_conf->dma_buf_sz; + + if (priv->sph) { + pp_params.offset = 0; + pp_params.max_len += stmmac_rx_offset(priv); + } rx_q->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx_q->page_pool)) { @@ -2378,9 +2441,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) if (txfifosz == 0) txfifosz = priv->dma_cap.tx_fifo_size; - /* Adjust for real per queue fifo size */ - rxfifosz /= rx_channels_count; - txfifosz /= tx_channels_count; + /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ + if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; + } if (priv->plat->force_thresh_dma_mode) { txmode = tc; @@ -2506,6 +2571,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) if (!xsk_tx_peek_desc(pool, &xdp_desc)) break; + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + xdp_desc.len > priv->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + continue; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -2557,7 +2629,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) true, priv->mode, true, true, xdp_desc.len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); xsk_tx_metadata_to_compl(meta, &tx_q->tx_skbuff_dma[entry].xsk_meta); @@ -2771,11 +2843,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, xmits = budget; } - if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && - priv->eee_sw_timer_en) { - if (stmmac_enable_eee_mode(priv)) - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); - } + if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode) + stmmac_restart_sw_lpi_timer(priv); /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) @@ -2916,7 +2985,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; - int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; /* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) @@ -3007,25 +3076,24 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 chan = 0; - int atds = 0; int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { - dev_err(priv->device, "Invalid DMA configuration\n"); + netdev_err(priv->dev, "Invalid DMA configuration\n"); return -EINVAL; } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) - atds = 1; + priv->plat->dma_cfg->atds = 1; ret = stmmac_reset(priv, priv->ioaddr); if (ret) { - dev_err(priv->device, "Failed to reset the dma\n"); + netdev_err(priv->dev, "Failed to reset the dma\n"); return ret; } /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); @@ -3361,27 +3429,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) } } -static int stmmac_fpe_start_wq(struct stmmac_priv *priv) -{ - char *name; - - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); - clear_bit(__FPE_REMOVING, &priv->fpe_task_state); - - name = priv->wq_name; - sprintf(name, "%s-fpe", priv->dev->name); - - priv->fpe_wq = create_singlethread_workqueue(name); - if (!priv->fpe_wq) { - netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); - - return -ENOMEM; - } - netdev_info(priv->dev, "FPE workqueue start"); - - return 0; -} - /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. @@ -3404,6 +3451,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) u32 chan; int ret; + /* Make sure RX clock is enabled */ + if (priv->hw->phylink_pcs) + phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); + /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -3444,9 +3495,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) priv->hw->rx_csum = 0; } - /* Enable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, true); - /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -3468,12 +3516,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) else if (ptp_register) stmmac_ptp_register(priv); - priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; - - /* Convert the timer from msec to usec */ - if (!priv->tx_lpi_timer) - priv->tx_lpi_timer = eee_timer * 1000; - if (priv->use_riwt) { u32 queue; @@ -3532,13 +3574,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) stmmac_set_hw_vlan_mode(priv, priv->hw); - if (priv->dma_cap.fpesel) { - stmmac_fpe_start_wq(priv); - - if (priv->plat->fpe_cfg->enable) - stmmac_fpe_handshake(priv, true); - } - return 0; } @@ -3591,6 +3626,10 @@ static void stmmac_free_irq(struct net_device *dev, if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) free_irq(priv->wol_irq, dev); fallthrough; + case REQ_IRQ_ERR_SFTY: + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) + free_irq(priv->sfty_irq, dev); + fallthrough; case REQ_IRQ_ERR_WOL: free_irq(dev->irq, dev); fallthrough; @@ -3661,6 +3700,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) } } + /* Request the common Safety Feature Correctible/Uncorrectible + * Error line in case of another line is used + */ + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { + int_name = priv->int_name_sfty; + sprintf(int_name, "%s:%s", dev->name, "safety"); + ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty MSI %d (error: %d)\n", + __func__, priv->sfty_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY; + goto irq_error; + } + } + /* Request the Safety Feature Correctible Error line in * case of another line is used */ @@ -3773,6 +3829,7 @@ static int stmmac_request_irq_single(struct net_device *dev) /* Request the Wake IRQ in case of another line * is used for WoL */ + priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { ret = request_irq(priv->wol_irq, stmmac_interrupt, IRQF_SHARED, dev->name, dev); @@ -3798,6 +3855,21 @@ static int stmmac_request_irq_single(struct net_device *dev) } } + /* Request the common Safety Feature Correctible/Uncorrectible + * Error line in case of another line is used + */ + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { + ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the sfty IRQ %d (%d)\n", + __func__, priv->sfty_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY; + goto irq_error; + } + } + return 0; irq_error: @@ -3910,15 +3982,16 @@ static int __stmmac_open(struct net_device *dev, u32 chan; int ret; + /* Initialise the tx lpi timer, converting from msec to usec */ + if (!priv->tx_lpi_timer) + priv->tx_lpi_timer = eee_timer * 1000; + ret = pm_runtime_resume_and_get(priv->device); if (ret < 0) return ret; - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI && - (!priv->hw->xpcs || - xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && - !priv->hw->lynx_pcs) { + if ((!priv->hw->xpcs || + xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, @@ -3928,8 +4001,6 @@ static int __stmmac_open(struct net_device *dev, } } - priv->rx_copybreak = STMMAC_RX_COPYBREAK; - buf_sz = dma_conf->dma_buf_sz; for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) @@ -4002,18 +4073,6 @@ static int stmmac_open(struct net_device *dev) return ret; } -static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) -{ - set_bit(__FPE_REMOVING, &priv->fpe_task_state); - - if (priv->fpe_wq) { - destroy_workqueue(priv->fpe_wq); - priv->fpe_wq = NULL; - } - - netdev_info(priv->dev, "FPE workqueue stop"); -} - /** * stmmac_release - close entry point of the driver * @dev : device pointer. @@ -4041,32 +4100,22 @@ static int stmmac_release(struct net_device *dev) /* Free the IRQ lines */ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); - if (priv->eee_enabled) { - priv->tx_path_in_lpi_mode = false; - del_timer_sync(&priv->eee_ctrl_timer); - } - /* Stop TX/RX DMA and clear the descriptors */ stmmac_stop_all_dma(priv); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv, &priv->dma_conf); - /* Disable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, false); - /* Powerdown Serdes if there is */ if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); - netif_carrier_off(dev); - stmmac_release_ptp(priv); - pm_runtime_put(priv->device); + if (stmmac_fpe_supported(priv)) + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); - if (priv->dma_cap.fpesel) - stmmac_fpe_stop_wq(priv); + pm_runtime_put(priv->device); return 0; } @@ -4136,11 +4185,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, desc = &tx_q->dma_tx[tx_q->cur_tx]; curr_addr = des + (total_len - tmp_len); - if (priv->dma_cap.addr64 <= 32) - desc->des0 = cpu_to_le32(curr_addr); - else - stmmac_set_desc_addr(priv, desc, curr_addr); - + stmmac_set_desc_addr(priv, desc, curr_addr); buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len; @@ -4186,17 +4231,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) * First Descriptor * -------- * | DES0 |---> buffer1 = L2/L3/L4 header - * | DES1 |---> TCP Payload (can continue on next descr...) - * | DES2 |---> buffer 1 and 2 len + * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address + * | | width is 32-bit, but we never use it. + * | | Also can be used as the most-significant 8-bits or 16-bits of + * | | buffer1 address pointer if the DMA AXI address width is 40-bit + * | | or 48-bit, and we always use it. + * | DES2 |---> buffer1 len * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] * -------- + * -------- + * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...) + * | DES1 |---> same as the First Descriptor + * | DES2 |---> buffer1 len + * | DES3 | + * -------- * | * ... * | * -------- - * | DES0 | --| Split TCP Payload on Buffers 1 and 2 - * | DES1 | --| - * | DES2 | --> buffer 1 and 2 len + * | DES0 |---> buffer1 = Split TCP Payload + * | DES1 |---> same as the First Descriptor + * | DES2 |---> buffer1 len * | DES3 | * -------- * @@ -4206,17 +4261,30 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int nfrags = skb_shinfo(skb)->nr_frags; - u32 queue = skb_get_queue_mapping(skb); unsigned int first_entry, tx_packets; struct stmmac_txq_stats *txq_stats; - int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; - bool has_vlan, set_ic; + u32 pay_len, mss, queue; + int i, first_tx, nfrags; u8 proto_hdr_len, hdr; - u32 pay_len, mss; dma_addr_t des; - int i; + bool set_ic; + + /* Always insert VLAN tag to SKB payload for TSO frames. + * + * Never insert VLAN tag by HW, since segments splited by + * TSO engine will be un-tagged by mistake. + */ + if (skb_vlan_tag_present(skb)) { + skb = __vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) { + priv->xstats.tx_dropped++; + return NETDEV_TX_OK; + } + } + + nfrags = skb_shinfo(skb)->nr_frags; + queue = skb_get_queue_mapping(skb); tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; @@ -4270,9 +4338,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) skb->data_len); } - /* Check if VLAN can be inserted by HW */ - has_vlan = stmmac_vlan_insert(priv, skb, tx_q); - first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -4282,37 +4347,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) desc = &tx_q->dma_tx[first_entry]; first = desc; - if (has_vlan) - stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); - /* first descriptor: fill Headers on Buf1 */ des = dma_map_single(priv->device, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; - tx_q->tx_skbuff_dma[first_entry].buf = des; - tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - tx_q->tx_skbuff_dma[first_entry].map_as_page = false; - tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; - - if (priv->dma_cap.addr64 <= 32) { - first->des0 = cpu_to_le32(des); - - /* Fill start of payload in buff2 of first descriptor */ - if (pay_len) - first->des1 = cpu_to_le32(des + proto_hdr_len); - - /* If needed take extra descriptors to fill the remaining payload */ - tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; - } else { - stmmac_set_desc_addr(priv, first, des); - tmp_pay_len = pay_len; - des += proto_hdr_len; - pay_len = 0; - } - - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + stmmac_set_desc_addr(priv, first, des); + stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len, + (nfrags == 0), queue); + + /* In case two or more DMA transmit descriptors are allocated for this + * non-paged SKB data, the DMA buffer address should be saved to + * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, + * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee + * that stmmac_tx_clean() does not unmap the entire DMA buffer too early + * since the tail areas of the DMA buffer can be accessed by DMA engine + * sooner or later. + * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf + * corresponding to the last descriptor, stmmac_tx_clean() will unmap + * this DMA buffer right after the DMA engine completely finishes the + * full buffer transmission. + */ + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; /* Prepare fragments */ for (i = 0; i < nfrags; i++) { @@ -4399,11 +4459,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) } /* Complete the first descriptor before granting the DMA */ - stmmac_prepare_tso_tx_desc(priv, first, 1, - proto_hdr_len, - pay_len, - 1, tx_q->tx_skbuff_dma[first_entry].last_segment, - hdr / 4, (skb->len - proto_hdr_len)); + stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1, + tx_q->tx_skbuff_dma[first_entry].last_segment, + hdr / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ if (mss_desc) { @@ -4490,7 +4548,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) - stmmac_disable_eee_mode(priv); + stmmac_stop_sw_lpi(priv); /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { @@ -4500,6 +4558,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return stmmac_tso_xmit(skb, dev); } + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + skb->len > priv->est->max_sdu[queue]){ + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, @@ -4708,7 +4773,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4717,6 +4782,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); +max_sdu_err: dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; @@ -4873,6 +4939,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED; + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + xdpf->len > priv->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + return STMMAC_XDP_CONSUMED; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -4927,7 +5000,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, u64_stats_update_end(&txq_stats->q_syncp); } - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; @@ -5051,9 +5124,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; - skb = __napi_alloc_skb(&ch->rxtx_napi, - xdp->data_end - xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start); if (unlikely(!skb)) return NULL; @@ -5309,7 +5381,7 @@ read_again: /* RX buffer is good and fit into a XSK pool buffer */ buf->xdp->data_end = buf->xdp->data + buf1_len; - xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); + xsk_buff_dma_sync_for_cpu(buf->xdp); prog = READ_ONCE(priv->xdp_prog); res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); @@ -5457,7 +5529,7 @@ read_again: if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { - page_pool_recycle_direct(rx_q->page_pool, buf->page); + page_pool_put_page(rx_q->page_pool, buf->page, 0, true); buf->page = NULL; error = 1; if (!priv->hwts_rx_en) @@ -5475,10 +5547,6 @@ read_again: /* Buffer is good. Go on. */ - prefetch(page_address(buf->page) + buf->page_offset); - if (buf->sec_page) - prefetch(page_address(buf->sec_page)); - buf1_len = stmmac_rx_buf1_len(priv, p, status, len); len += buf1_len; buf2_len = stmmac_rx_buf2_len(priv, p, status, len); @@ -5500,6 +5568,8 @@ read_again: dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); + net_prefetch(page_address(buf->page) + + buf->page_offset); xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); xdp_prepare_buff(&ctx.xdp, page_address(buf->page), @@ -5553,22 +5623,26 @@ read_again: } if (!skb) { + unsigned int head_pad_len; + /* XDP program may expand or reduce tail */ buf1_len = ctx.xdp.data_end - ctx.xdp.data; - skb = napi_alloc_skb(&ch->rx_napi, buf1_len); + skb = napi_build_skb(page_address(buf->page), + rx_q->napi_skb_frag_size); if (!skb) { + page_pool_recycle_direct(rx_q->page_pool, + buf->page); rx_dropped++; count++; goto drain_data; } /* XDP program may adjust header */ - skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); + head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start; + skb_reserve(skb, head_pad_len); skb_put(skb, buf1_len); - - /* Data payload copied into SKB, page ready for recycle */ - page_pool_recycle_direct(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb); buf->page = NULL; } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, @@ -5576,9 +5650,6 @@ read_again: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - skb_mark_for_recycle(skb); buf->page = NULL; } @@ -5588,9 +5659,6 @@ read_again: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_conf.dma_buf_sz); - - /* Data payload appended into SKB */ - skb_mark_for_recycle(skb); buf->sec_page = NULL; } @@ -5857,7 +5925,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) stmmac_set_rx_mode(dev); } - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); netdev_update_features(dev); return 0; @@ -5926,49 +5994,6 @@ static int stmmac_set_features(struct net_device *netdev, return 0; } -static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) -{ - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - - if (status == FPE_EVENT_UNKNOWN || !*hs_enable) - return; - - /* If LP has sent verify mPacket, LP is FPE capable */ - if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { - if (*lp_state < FPE_STATE_CAPABLE) - *lp_state = FPE_STATE_CAPABLE; - - /* If user has requested FPE enable, quickly response */ - if (*hs_enable) - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_RESPONSE); - } - - /* If Local has sent verify mPacket, Local is FPE capable */ - if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { - if (*lo_state < FPE_STATE_CAPABLE) - *lo_state = FPE_STATE_CAPABLE; - } - - /* If LP has sent response mPacket, LP is entering FPE ON */ - if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) - *lp_state = FPE_STATE_ENTERING_ON; - - /* If Local has sent response mPacket, Local is entering FPE ON */ - if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) - *lo_state = FPE_STATE_ENTERING_ON; - - if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && - !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && - priv->fpe_wq) { - queue_work(priv->fpe_wq, &priv->fpe_task); - } -} - static void stmmac_common_interrupt(struct stmmac_priv *priv) { u32 rx_cnt = priv->plat->rx_queues_to_use; @@ -5987,12 +6012,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) stmmac_est_irq_status(priv, priv, priv->dev, &priv->xstats, tx_cnt); - if (priv->dma_cap.fpesel) { - int status = stmmac_fpe_irq_status(priv, priv->ioaddr, - priv->dev); - - stmmac_fpe_event_status(priv, status); - } + if (stmmac_fpe_supported(priv)) + stmmac_fpe_irq_status(priv); /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || xmac) { @@ -6006,10 +6027,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) priv->tx_path_in_lpi_mode = false; } - for (queue = 0; queue < queues_count; queue++) { - status = stmmac_host_mtl_irq_status(priv, priv->hw, - queue); - } + for (queue = 0; queue < queues_count; queue++) + stmmac_host_mtl_irq_status(priv, priv->hw, queue); /* PCS link status */ if (priv->hw->pcs && @@ -6044,8 +6063,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; - /* Check if a fatal error happened */ - if (stmmac_safety_feat_interrupt(priv)) + /* Check ASP error if it isn't delivered via an individual IRQ */ + if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) return IRQ_HANDLED; /* To handle Common interrupts */ @@ -6205,6 +6224,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, switch (type) { case TC_QUERY_CAPS: return stmmac_tc_query_caps(priv, priv, type_data); + case TC_SETUP_QDISC_MQPRIO: + return stmmac_tc_setup_mqprio(priv, priv, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &stmmac_block_cb_list, @@ -6520,11 +6541,7 @@ static int stmmac_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (priv->dbgfs_dir) - priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, - priv->dbgfs_dir, - stmmac_fs_dir, - dev->name); + debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); break; } done: @@ -6590,7 +6607,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le) static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) { u32 crc, hash = 0; - __le16 pmatch = 0; + u16 pmatch = 0; int count = 0; u16 vid = 0; @@ -6605,7 +6622,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) if (count > 2) /* VID = 0 always passes filter */ return -EOPNOTSUPP; - pmatch = cpu_to_le16(vid); + pmatch = vid; hash = 0; } @@ -6853,6 +6870,8 @@ void stmmac_xdp_release(struct net_device *dev) /* Ensure tx function is not running */ netif_tx_disable(dev); + phylink_stop(priv->phylink); + /* Disable NAPI process */ stmmac_disable_all_queues(priv); @@ -6868,14 +6887,10 @@ void stmmac_xdp_release(struct net_device *dev) /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv, &priv->dma_conf); - /* Disable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, false); - /* set trans_start so we don't get spurious * watchdogs during reset */ netif_trans_update(dev); - netif_carrier_off(dev); } int stmmac_xdp_open(struct net_device *dev) @@ -6958,25 +6973,25 @@ int stmmac_xdp_open(struct net_device *dev) tx_q->txtimer.function = stmmac_tx_timer; } - /* Enable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, true); - /* Start Rx & Tx DMA Channels */ stmmac_start_all_dma(priv); + phylink_start(priv->phylink); + ret = stmmac_request_irq(dev); if (ret) goto irq_error; /* Enable NAPI process*/ stmmac_enable_all_queues(priv); - netif_carrier_on(dev); netif_tx_start_all_queues(dev); stmmac_enable_all_dma_irq(priv); return 0; irq_error: + phylink_stop(priv->phylink); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); @@ -7206,6 +7221,36 @@ static int stmmac_hw_init(struct stmmac_priv *priv) if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); + if (priv->dma_cap.number_rx_queues && + priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) { + dev_warn(priv->device, + "Number of Rx queues (%u) exceeds dma capability\n", + priv->plat->rx_queues_to_use); + priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues; + } + if (priv->dma_cap.number_tx_queues && + priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) { + dev_warn(priv->device, + "Number of Tx queues (%u) exceeds dma capability\n", + priv->plat->tx_queues_to_use); + priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues; + } + + if (priv->dma_cap.rx_fifo_size && + priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) { + dev_warn(priv->device, + "Rx FIFO size (%u) exceeds dma capability\n", + priv->plat->rx_fifo_size); + priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size; + } + if (priv->dma_cap.tx_fifo_size && + priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) { + dev_warn(priv->device, + "Tx FIFO size (%u) exceeds dma capability\n", + priv->plat->tx_fifo_size); + priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size; + } + priv->hw->vlan_fail_q_en = (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; @@ -7299,7 +7344,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); - stmmac_set_half_duplex(priv); stmmac_napi_add(dev); if (netif_running(dev)) @@ -7325,71 +7369,6 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) return ret; } -#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" -static void stmmac_fpe_lp_task(struct work_struct *work) -{ - struct stmmac_priv *priv = container_of(work, struct stmmac_priv, - fpe_task); - struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; - enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; - enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; - bool *hs_enable = &fpe_cfg->hs_enable; - bool *enable = &fpe_cfg->enable; - int retries = 20; - - while (retries-- > 0) { - /* Bail out immediately if FPE handshake is OFF */ - if (*lo_state == FPE_STATE_OFF || !*hs_enable) - break; - - if (*lo_state == FPE_STATE_ENTERING_ON && - *lp_state == FPE_STATE_ENTERING_ON) { - stmmac_fpe_configure(priv, priv->ioaddr, - fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, - *enable); - - netdev_info(priv->dev, "configured FPE\n"); - - *lo_state = FPE_STATE_ON; - *lp_state = FPE_STATE_ON; - netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); - break; - } - - if ((*lo_state == FPE_STATE_CAPABLE || - *lo_state == FPE_STATE_ENTERING_ON) && - *lp_state != FPE_STATE_ON) { - netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, - *lo_state, *lp_state); - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - fpe_cfg, - MPACKET_VERIFY); - } - /* Sleep then retry */ - msleep(500); - } - - clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); -} - -void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) -{ - if (priv->plat->fpe_cfg->hs_enable != enable) { - if (enable) { - stmmac_fpe_send_mpacket(priv, priv->ioaddr, - priv->plat->fpe_cfg, - MPACKET_VERIFY); - } else { - priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; - priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; - } - - priv->plat->fpe_cfg->hs_enable = enable; - } -} - static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct stmmac_xdp_buff *ctx = (void *)_ctx; @@ -7464,7 +7443,7 @@ int stmmac_dvr_probe(struct device *device, return -ENOMEM; stmmac_set_ethtool_ops(ndev); - priv->pause = pause; + priv->pause_time = pause; priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; @@ -7474,6 +7453,7 @@ int stmmac_dvr_probe(struct device *device, priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; priv->lpi_irq = res->lpi_irq; + priv->sfty_irq = res->sfty_irq; priv->sfty_ce_irq = res->sfty_ce_irq; priv->sfty_ue_irq = res->sfty_ue_irq; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) @@ -7503,8 +7483,7 @@ int stmmac_dvr_probe(struct device *device, INIT_WORK(&priv->service_task, stmmac_service_task); - /* Initialize Link Partner FPE workqueue */ - INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); + timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); /* Override with kernel parameters if supplied XXX CRS XXX * this needs to have multiple instances @@ -7612,9 +7591,10 @@ int stmmac_dvr_probe(struct device *device, #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - priv->hw->hw_vlan_en = true; - + if (priv->plat->has_gmac4) { + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + priv->hw->hw_vlan_en = true; + } if (priv->dma_cap.vlhash) { ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; @@ -7639,8 +7619,6 @@ int stmmac_dvr_probe(struct device *device, ndev->features |= NETIF_F_RXHASH; ndev->vlan_features |= ndev->features; - /* TSO doesn't work on VLANs yet */ - ndev->vlan_features &= ~NETIF_F_TSO; /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; @@ -7661,9 +7639,6 @@ int stmmac_dvr_probe(struct device *device, "%s: warning: maxmtu having invalid value (%d)\n", __func__, priv->plat->maxmtu); - if (flow_ctrl) - priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* Setup channels NAPI */ @@ -7671,6 +7646,8 @@ int stmmac_dvr_probe(struct device *device, mutex_init(&priv->lock); + stmmac_fpe_init(priv); + /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed. Viceversa the driver'll try to @@ -7689,26 +7666,20 @@ int stmmac_dvr_probe(struct device *device, if (!pm_runtime_enabled(device)) pm_runtime_enable(device); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) { - /* MDIO bus Registration */ - ret = stmmac_mdio_register(ndev); - if (ret < 0) { - dev_err_probe(priv->device, ret, - "%s: MDIO bus (id: %d) registration failed\n", - __func__, priv->plat->bus_id); - goto error_mdio_register; - } + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + dev_err_probe(priv->device, ret, + "MDIO bus (id: %d) registration failed\n", + priv->plat->bus_id); + goto error_mdio_register; } if (priv->plat->speed_mode_2500) priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); - if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { - ret = stmmac_xpcs_setup(priv->mii); - if (ret) - goto error_xpcs_setup; - } + ret = stmmac_pcs_setup(ndev); + if (ret) + goto error_pcs_setup; ret = stmmac_phy_setup(priv); if (ret) { @@ -7739,11 +7710,10 @@ int stmmac_dvr_probe(struct device *device, error_netdev_register: phylink_destroy(priv->phylink); -error_xpcs_setup: error_phy_setup: - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); + stmmac_pcs_clean(ndev); +error_pcs_setup: + stmmac_mdio_unregister(ndev); error_mdio_register: stmmac_napi_del(ndev); error_hw_init: @@ -7770,9 +7740,6 @@ void stmmac_dvr_remove(struct device *dev) pm_runtime_get_sync(dev); - stmmac_stop_all_dma(priv); - stmmac_mac_set(priv, priv->ioaddr, false); - netif_carrier_off(ndev); unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS @@ -7782,9 +7749,10 @@ void stmmac_dvr_remove(struct device *dev) if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); reset_control_assert(priv->plat->stmmac_ahb_rst); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); + + stmmac_pcs_clean(ndev); + stmmac_mdio_unregister(ndev); + destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps); @@ -7819,7 +7787,7 @@ int stmmac_suspend(struct device *dev) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); - if (priv->eee_enabled) { + if (priv->eee_sw_timer_en) { priv->tx_path_in_lpi_mode = false; del_timer_sync(&priv->eee_ctrl_timer); } @@ -7842,25 +7810,15 @@ int stmmac_suspend(struct device *dev) mutex_unlock(&priv->lock); rtnl_lock(); - if (device_may_wakeup(priv->device) && priv->plat->pmt) { - phylink_suspend(priv->phylink, true); - } else { - if (device_may_wakeup(priv->device)) - phylink_speed_down(priv->phylink, false); - phylink_suspend(priv->phylink, false); - } - rtnl_unlock(); + if (device_may_wakeup(priv->device) && !priv->plat->pmt) + phylink_speed_down(priv->phylink, false); - if (priv->dma_cap.fpesel) { - /* Disable FPE */ - stmmac_fpe_configure(priv, priv->ioaddr, - priv->plat->fpe_cfg, - priv->plat->tx_queues_to_use, - priv->plat->rx_queues_to_use, false); + phylink_suspend(priv->phylink, + device_may_wakeup(priv->device) && priv->plat->pmt); + rtnl_unlock(); - stmmac_fpe_handshake(priv, false); - stmmac_fpe_stop_wq(priv); - } + if (stmmac_fpe_supported(priv)) + timer_shutdown_sync(&priv->fpe_cfg.verify_timer); priv->speed = SPEED_UNKNOWN; return 0; @@ -7946,16 +7904,8 @@ int stmmac_resume(struct device *dev) } rtnl_lock(); - if (device_may_wakeup(priv->device) && priv->plat->pmt) { - phylink_resume(priv->phylink); - } else { - phylink_resume(priv->phylink); - if (device_may_wakeup(priv->device)) - phylink_speed_up(priv->phylink); - } - rtnl_unlock(); + phylink_prepare_resume(priv->phylink); - rtnl_lock(); mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -7973,6 +7923,11 @@ int stmmac_resume(struct device *dev) stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); + + phylink_resume(priv->phylink); + if (device_may_wakeup(priv->device) && !priv->plat->pmt) + phylink_speed_up(priv->phylink); + rtnl_unlock(); netif_device_attach(ndev); |