summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2468
1 files changed, 1409 insertions, 1059 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a3182b9e767..da206b24aaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -39,11 +40,14 @@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/devlink.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
+#include "stmmac_fpe.h"
#include "stmmac.h"
+#include "stmmac_pcs.h"
#include "stmmac_xdp.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
@@ -55,8 +59,7 @@
* with fine resolution and binary rollover. This avoid non-monotonic behavior
* (clock jumps) when changing timestamping settings at runtime.
*/
-#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
- PTP_TCR_TSCTRLSSR)
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -76,7 +79,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -88,33 +90,32 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -147,37 +148,84 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+struct stmmac_devlink_priv {
+ struct stmmac_priv *stmmac_priv;
+};
+
+enum stmmac_dl_param_id {
+ STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
+};
+
+/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- int ret = 0;
+ long rate = rgmii_clock(speed);
- if (enabled) {
- ret = clk_prepare_enable(priv->plat->stmmac_clk);
- if (ret)
- return ret;
- ret = clk_prepare_enable(priv->plat->pclk);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- return ret;
- }
- if (priv->plat->clks_config) {
- ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- return ret;
- }
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
+ * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
+ * @regval: pointer to a u32 for the resulting register value
+ * @blen: pointer to an array of u32 containing the burst length values in bytes
+ * @len: the number of entries in the @blen array
+ */
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
+{
+ size_t i;
+ u32 val;
+
+ for (val = i = 0; i < len; i++) {
+ u32 burst = blen[i];
+
+ /* Burst values of zero must be skipped. */
+ if (!burst)
+ continue;
+
+ /* The valid range for the burst length is 4 to 256 inclusive,
+ * and it must be a power of two.
+ */
+ if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
+ pr_err("stmmac: invalid burst length %u at index %zu\n",
+ burst, i);
+ continue;
}
- } else {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- if (priv->plat->clks_config)
- priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+
+ /* Since burst is a power of two, and the register field starts
+ * with burst = 4, shift right by two bits so bit 0 of the field
+ * corresponds with the minimum value.
+ */
+ val |= burst >> 2;
}
- return ret;
+ *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
}
-EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
/**
* stmmac_verify_args - verify the driver parameters.
@@ -188,16 +236,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -286,73 +329,6 @@ static void stmmac_global_err(struct stmmac_priv *priv)
stmmac_service_event_schedule(priv);
}
-/**
- * stmmac_clk_csr_set - dynamically set the MDC clock
- * @priv: driver private structure
- * Description: this is to dynamically set the MDC clock according to the csr
- * clock input.
- * Note:
- * If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
- * clock dynamically according to the actual clock input.
- */
-static void stmmac_clk_csr_set(struct stmmac_priv *priv)
-{
- u32 clk_rate;
-
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
-
- /* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones.
- * For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider.
- */
- if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
- if (clk_rate < CSR_F_35M)
- priv->clk_csr = STMMAC_CSR_20_35M;
- else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
- priv->clk_csr = STMMAC_CSR_35_60M;
- else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
- priv->clk_csr = STMMAC_CSR_60_100M;
- else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
- priv->clk_csr = STMMAC_CSR_100_150M;
- else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
- priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
- priv->clk_csr = STMMAC_CSR_250_300M;
- }
-
- if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
- if (clk_rate > 160000000)
- priv->clk_csr = 0x03;
- else if (clk_rate > 80000000)
- priv->clk_csr = 0x02;
- else if (clk_rate > 40000000)
- priv->clk_csr = 0x01;
- else
- priv->clk_csr = 0;
- }
-
- if (priv->plat->has_xgmac) {
- if (clk_rate > 400000000)
- priv->clk_csr = 0x5;
- else if (clk_rate > 350000000)
- priv->clk_csr = 0x4;
- else if (clk_rate > 300000000)
- priv->clk_csr = 0x3;
- else if (clk_rate > 250000000)
- priv->clk_csr = 0x2;
- else if (clk_rate > 150000000)
- priv->clk_csr = 0x1;
- else
- priv->clk_csr = 0x0;
- }
-}
-
static void print_pkt(unsigned char *buf, int len)
{
pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
@@ -390,23 +366,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
-{
- int tx_lpi_timer;
-
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
-}
-
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -416,31 +376,45 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
}
- /* Check and enter in LPI mode */
- if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
return;
}
- stmmac_reset_eee_mode(priv, priv->hw);
- del_timer_sync(&priv->eee_ctrl_timer);
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
+}
+
+/**
+ * stmmac_stop_sw_lpi - stop transmitting LPI
+ * @priv: driver private structure
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
+ */
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
+{
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -453,76 +427,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
*/
static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
-
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
-}
-
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-bool stmmac_eee_init(struct stmmac_priv *priv)
-{
- int eee_tw_timer = priv->eee_tw_timer;
-
- /* Using PCS we cannot dial with the phy registers at this stage
- * so we do not support extra feature like EEE.
- */
- if (priv->hw->pcs == STMMAC_PCS_TBI ||
- priv->hw->pcs == STMMAC_PCS_RTBI)
- return false;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- mutex_unlock(&priv->lock);
- return false;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
- } else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
+ stmmac_try_to_start_sw_lpi(priv);
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -586,7 +493,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc = np;
/* Check if timestamp is available */
@@ -604,21 +511,49 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
}
}
+static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
+{
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
+ u32 sec_inc = 0;
+ u64 temp = 0;
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+}
+
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -629,34 +564,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -670,7 +607,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -680,7 +617,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -691,7 +628,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -702,7 +639,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -713,7 +650,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -725,7 +662,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -737,7 +674,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -749,7 +686,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -763,7 +700,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -771,20 +708,22 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
+ if (!priv->tsfupdt_coarse)
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
if (priv->hwts_tx_en || priv->hwts_rx_en) {
priv->systime_flags |= tstamp_all | ptp_v2 |
@@ -795,31 +734,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -832,36 +770,20 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
* Will be rerun after resuming from suspend, case in which the timestamping
* flags updated by stmmac_hwtstamp_set() also need to be restored.
*/
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
+ u32 systime_flags)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
- u32 sec_inc = 0;
- u64 temp = 0;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
+ if (!priv->plat->clk_ptp_rate) {
+ netdev_err(priv->dev, "Invalid PTP clock rate");
+ return -EINVAL;
+ }
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv, priv->ptpaddr,
- priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment for later use */
- priv->sub_second_inc = sec_inc;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+ stmmac_update_subsecond_increment(priv);
/* initialize system time */
ktime_get_real_ts64(&now);
@@ -871,26 +793,33 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
return 0;
}
-EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
/**
- * stmmac_init_ptp - init PTP
+ * stmmac_init_timestamping - initialise timestamping
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
* This function also registers the ptp driver.
*/
-static int stmmac_init_ptp(struct stmmac_priv *priv)
+static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
int ret;
if (priv->plat->ptp_clk_freq_config)
priv->plat->ptp_clk_freq_config(priv);
- ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
- if (ret)
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
+ PTP_TCR_TSCFUPDT);
+ if (ret) {
+ netdev_warn(priv->dev, "PTP init failed\n");
return ret;
+ }
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -916,36 +845,76 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_setup_ptp(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+
+ if (stmmac_init_timestamping(priv) == 0)
+ stmmac_ptp_register(priv);
+}
+
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
+}
+
+static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ /* Refresh the MAC-specific capabilities */
+ stmmac_mac_update_caps(priv);
+
+ config->mac_capabilities = priv->hw->link.caps;
+
+ if (priv->plat->max_speed)
+ phylink_limit_mac_speed(config, priv->plat->max_speed);
+
+ return config->mac_capabilities;
}
static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ struct phylink_pcs *pcs;
- if (priv->hw->xpcs)
- return &priv->hw->xpcs->pcs;
+ if (priv->plat->select_pcs) {
+ pcs = priv->plat->select_pcs(priv, interface);
+ if (!IS_ERR(pcs))
+ return pcs;
+ }
- if (priv->hw->lynx_pcs)
- return priv->hw->lynx_pcs;
+ /* The PCS control register is only relevant for SGMII, TBI and RTBI
+ * modes. We no longer support TBI or RTBI, so only configure this
+ * register when operating in SGMII mode with the integrated PCS.
+ */
+ if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
+ return &priv->integrated_pcs->pcs;
return NULL;
}
@@ -956,19 +925,16 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
/* Nothing to do, xpcs_config() handles everything */
}
-static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
- } else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
- }
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
+
+ return 0;
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -977,13 +943,11 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
+ if (priv->dma_cap.eee)
+ stmmac_set_eee_pls(priv, priv->hw, false);
- if (priv->dma_cap.fpesel)
- stmmac_fpe_link_state_handle(priv, false);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -993,7 +957,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1061,8 +1027,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1073,41 +1037,124 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure %s transmit clock for %dMbps: %pe\n",
+ phy_modes(interface), speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
- if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
- }
- if (priv->dma_cap.fpesel)
- stmmac_fpe_link_state_handle(priv, true);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
}
+static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ timer_delete_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
+}
+
+static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
+
+ priv->tx_lpi_timer = timer;
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ device_set_wakeup_enable(priv->device, !!wolopts);
+
+ mutex_lock(&priv->lock);
+ priv->wolopts = wolopts;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
+ .mac_finish = stmmac_mac_finish,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
+ .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_wol_set = stmmac_mac_wol_set,
};
/**
@@ -1119,18 +1166,26 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->mac_interface;
-
- if (priv->dma_cap.pcs) {
- if ((interface == PHY_INTERFACE_MODE_RGMII) ||
- (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_RGMII;
- } else if (interface == PHY_INTERFACE_MODE_SGMII) {
- netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_SGMII;
+ int interface = priv->plat->phy_interface;
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ priv->hw->reverse_sgmii_enable = true;
+ break;
+
+ default:
+ dev_warn(priv->device, "invalid port speed\n");
+ fallthrough;
+ case 0:
+ priv->hw->reverse_sgmii_enable = false;
+ break;
}
}
}
@@ -1146,13 +1201,19 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
+ struct ethtool_keee eee;
int ret;
if (!phylink_expects_phy(priv->phylink))
return 0;
+ if (priv->hw->xpcs &&
+ xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
+ return 0;
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1186,66 +1247,110 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
- if (!priv->plat->pmt) {
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ if (ret) {
+ netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
- device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is a
+ * chance that userspace could change the eee_timer setting via sysfs
+ * before the first open. Thus, preserve existing behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
}
- return ret;
+ return 0;
}
-static int stmmac_phy_setup(struct stmmac_priv *priv)
+static int stmmac_phylink_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
- int max_speed;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
+
+ /* Stmmac always requires an RX clock for hardware initialization */
+ config->mac_requires_rxc = true;
+
+ /* Disable EEE RX clock stop to ensure VLAN register access works
+ * correctly.
+ */
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
+ !(priv->dev->features & NETIF_F_VLAN_FEATURES))
+ config->eee_rx_clk_stop_enable = true;
+
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.ovr_an_inband =
- mdio_bus_data->xpcs_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
+
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
+ */
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
+ if (pcs)
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ pcs->supported_interfaces);
- /* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use <= 1)
- priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
- MAC_1000HD;
+ if (priv->dma_cap.eee) {
+ /* Assume all supported interfaces also support LPI */
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
- /* Get the MAC specific capabilities */
- stmmac_mac_phylink_get_caps(priv);
+ /* All full duplex speeds above 100Mbps are supported */
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
+ }
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+ config->wol_phy_speed_ctrl = true;
+ if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
+ config->wol_phy_legacy = true;
+ } else {
+ if (priv->dma_cap.pmt_remote_wake_up)
+ config->wol_mac_support |= WAKE_UCAST;
+ if (priv->dma_cap.pmt_magic_frame)
+ config->wol_mac_support |= WAKE_MAGIC;
+ }
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -1321,9 +1426,17 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
-static int stmmac_set_bfsize(int mtu, int bufsize)
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
{
- int ret = bufsize;
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
+static int stmmac_set_bfsize(int mtu)
+{
+ int ret;
if (mtu >= BUF_SIZE_8KiB)
ret = BUF_SIZE_16KiB;
@@ -1452,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
buf->page_offset = stmmac_rx_offset(priv);
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2017,22 +2130,31 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
+
+ if (priv->sph_active) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2369,9 +2491,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
- /* Adjust for real per queue fifo size */
- rxfifosz /= rx_channels_count;
- txfifosz /= tx_channels_count;
+ /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
+ if (dwmac_is_xmac(priv->plat->core_type)) {
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+ }
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -2422,24 +2546,78 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
+static void stmmac_xsk_request_timestamp(void *_priv)
+{
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
+ *meta_req->set_ic = true;
+}
+
+static u64 stmmac_xsk_fill_timestamp(void *_priv)
+{
+ struct stmmac_xsk_tx_complete *tx_compl = _priv;
+ struct stmmac_priv *priv = tx_compl->priv;
+ struct dma_desc *desc = tx_compl->desc;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return 0;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, desc)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+ return ns_to_ktime(ns);
+ }
+
+ return 0;
+}
+
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
+static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = stmmac_xsk_request_timestamp,
+ .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
+};
+
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
budget = min(budget, stmmac_tx_avail(priv, queue));
- while (budget-- > 0) {
+ for (; budget > 0; budget--) {
+ struct stmmac_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
bool set_ic;
@@ -2455,6 +2633,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdp_desc.len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -2463,6 +2648,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_desc = tx_q->dma_tx + entry;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
@@ -2490,6 +2676,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
else
set_ic = false;
+ meta_req.priv = priv;
+ meta_req.tx_desc = tx_desc;
+ meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
+ xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
+ &meta_req);
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
@@ -2497,17 +2690,20 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+
+ xsk_tx_metadata_to_compl(meta,
+ &tx_q->tx_skbuff_dma[entry].xsk_meta);
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2542,15 +2738,19 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
* @priv: driver private structure
* @budget: napi budget limiting this functions packet handling
* @queue: TX queue index
+ * @pending_packets: signal to arm the TX coal timer
* Description: it reclaims the transmit resources after transmission completes.
+ * If some packets still needs to be handled, due to TX coalesce, set
+ * pending_packets to true to make NAPI arm the TX coal timer.
*/
-static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
+ bool *pending_packets)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2606,8 +2806,19 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
} else {
tx_packets++;
}
- if (skb)
+ if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
+ struct stmmac_xsk_tx_complete tx_compl = {
+ .priv = priv,
+ .desc = p,
+ };
+
+ xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
+ &stmmac_xsk_tx_metadata_ops,
+ &tx_compl);
+ }
}
if (likely(tx_q->tx_skbuff_dma[entry].buf &&
@@ -2696,23 +2907,18 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ *pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_packets += tx_packets;
- tx_q->txq_stats.tx_pkt_n += tx_packets;
- tx_q->txq_stats.tx_clean++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -2843,7 +3049,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+ int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
@@ -2918,6 +3124,56 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
}
}
+int stmmac_get_phy_intf_sel(phy_interface_t interface)
+{
+ int phy_intf_sel = -EINVAL;
+
+ if (interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_GMII)
+ phy_intf_sel = PHY_INTF_SEL_GMII_MII;
+ else if (phy_interface_mode_is_rgmii(interface))
+ phy_intf_sel = PHY_INTF_SEL_RGMII;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ phy_intf_sel = PHY_INTF_SEL_SGMII;
+ else if (interface == PHY_INTERFACE_MODE_RMII)
+ phy_intf_sel = PHY_INTF_SEL_RMII;
+ else if (interface == PHY_INTERFACE_MODE_REVMII)
+ phy_intf_sel = PHY_INTF_SEL_REVMII;
+
+ return phy_intf_sel;
+}
+EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
+
+static int stmmac_prereset_configure(struct stmmac_priv *priv)
+{
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
+ phy_interface_t interface;
+ int phy_intf_sel, ret;
+
+ if (!plat_dat->set_phy_intf_sel)
+ return 0;
+
+ interface = plat_dat->phy_interface;
+ phy_intf_sel = stmmac_get_phy_intf_sel(interface);
+ if (phy_intf_sel < 0) {
+ netdev_err(priv->dev,
+ "failed to get phy_intf_sel for %s: %pe\n",
+ phy_modes(interface), ERR_PTR(phy_intf_sel));
+ return phy_intf_sel;
+ }
+
+ ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
+ if (ret == -EINVAL)
+ netdev_err(priv->dev, "platform does not support %s\n",
+ phy_modes(interface));
+ else if (ret < 0)
+ netdev_err(priv->dev,
+ "platform failed to set interface %s: %pe\n",
+ phy_modes(interface), ERR_PTR(ret));
+
+ return ret;
+}
+
/**
* stmmac_init_dma_engine - DMA init.
* @priv: driver private structure
@@ -2934,25 +3190,28 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 chan = 0;
- int atds = 0;
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
- atds = 1;
+ priv->plat->dma_cfg->atds = 1;
+
+ ret = stmmac_prereset_configure(priv);
+ if (ret)
+ return ret;
- ret = stmmac_reset(priv, priv->ioaddr);
+ ret = stmmac_reset(priv);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
/* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -2995,10 +3254,26 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+ struct stmmac_channel *ch;
+ struct napi_struct *napi;
+
+ if (!tx_coal_timer)
+ return;
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ /* Arm timer only if napi is not already scheduled.
+ * Try to cancel any timer if napi is scheduled, timer will be armed
+ * again in the next scheduled napi.
+ */
+ if (unlikely(!napi_is_scheduled(napi)))
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(tx_coal_timer),
+ HRTIMER_MODE_REL);
+ else
+ hrtimer_try_to_cancel(&tx_q->txtimer);
}
/**
@@ -3049,8 +3324,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3272,31 +3546,9 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3306,7 +3558,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+static int stmmac_hw_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3315,9 +3567,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
u32 chan;
int ret;
+ /* Make sure RX clock is enabled */
+ if (priv->hw->phylink_pcs)
+ phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3325,19 +3590,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
-
- /* PS and related bits will be programmed according to the speed */
- if (priv->hw->pcs) {
- int speed = priv->plat->mac_port_sel_speed;
-
- if ((speed == SPEED_10) || (speed == SPEED_100) ||
- (speed == SPEED_1000)) {
- priv->hw->ps = speed;
- } else {
- dev_warn(priv->device, "invalid port speed\n");
- priv->hw->ps = 0;
- }
- }
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* Initialize the MAC Core */
stmmac_core_init(priv, priv->hw, dev);
@@ -3363,28 +3616,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
- if (ptp_register) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- }
-
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_info(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- else if (ptp_register)
- stmmac_ptp_register(priv);
-
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3397,9 +3628,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
}
- if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
-
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@ -3417,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
/* Enable Split Header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
for (chan = 0; chan < rx_cnt; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
@@ -3441,23 +3669,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
+ phylink_rx_clk_stop_block(priv->phylink);
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
-static void stmmac_hw_teardown(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
-}
-
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
@@ -3500,6 +3718,10 @@ static void stmmac_free_irq(struct net_device *dev,
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
+ case REQ_IRQ_ERR_SFTY:
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
+ free_irq(priv->sfty_irq, dev);
+ fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
@@ -3514,7 +3736,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3569,6 +3790,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ int_name = priv->int_name_sfty;
+ sprintf(int_name, "%s:%s", dev->name, "safety");
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty MSI %d (error: %d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
@@ -3625,9 +3863,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3650,9 +3887,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3706,6 +3942,21 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
return 0;
irq_error:
@@ -3749,12 +4000,13 @@ stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
return ERR_PTR(-ENOMEM);
}
+ /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
bfsize = stmmac_set_16kib_bfsize(priv, mtu);
if (bfsize < 0)
bfsize = 0;
if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(mtu, 0);
+ bfsize = stmmac_set_bfsize(mtu);
dma_conf->dma_buf_sz = bfsize;
/* Chose the tx/rx size from the already defined one in the
@@ -3814,31 +4066,12 @@ static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
u32 chan;
int ret;
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI &&
- (!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
- !priv->hw->lynx_pcs) {
- ret = stmmac_init_phy(dev);
- if (ret) {
- netdev_err(priv->dev,
- "%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
- goto init_phy_error;
- }
- }
-
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- buf_sz = dma_conf->dma_buf_sz;
+ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv);
@@ -3853,17 +4086,17 @@ static int __stmmac_open(struct net_device *dev,
}
}
- ret = stmmac_hw_setup(dev, true);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_setup_ptp(priv);
+
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
ret = stmmac_request_irq(dev);
if (ret)
@@ -3881,11 +4114,8 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
+ stmmac_release_ptp(priv);
init_error:
- phylink_disconnect_phy(priv->phylink);
-init_phy_error:
- pm_runtime_put(priv->device);
return ret;
}
@@ -3895,44 +4125,50 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_dma_conf *dma_conf;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ goto err_dma_resources;
+
+ ret = stmmac_init_phy(dev);
+ if (ret)
+ goto err_runtime_pm;
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- free_dma_desc_resources(priv, dma_conf);
+ goto err_disconnect_phy;
kfree(dma_conf);
- return ret;
-}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
- if (priv->fpe_wq)
- destroy_workqueue(priv->fpe_wq);
+ return ret;
- netdev_info(priv->dev, "FPE workqueue stop");
+err_disconnect_phy:
+ phylink_disconnect_phy(priv->phylink);
+err_runtime_pm:
+ pm_runtime_put(priv->device);
+err_dma_resources:
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
+ return ret;
}
-/**
- * stmmac_release - close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver.
- */
-static int stmmac_release(struct net_device *dev)
+static void __stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
- phylink_disconnect_phy(priv->phylink);
stmmac_disable_all_queues(priv);
@@ -3944,32 +4180,43 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
- netif_carrier_off(dev);
-
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+}
- if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+
+ __stmmac_release(dev);
+
+ phylink_disconnect_phy(priv->phylink);
+ pm_runtime_put(priv->device);
return 0;
}
@@ -3977,18 +4224,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
- return false;
- if (!skb_vlan_tag_present(skb))
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -3997,7 +4237,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4039,11 +4279,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4089,17 +4325,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4109,19 +4355,33 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int nfrags = skb_shinfo(skb)->nr_frags;
- u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
- int tmp_pay_len = 0, first_tx;
+ struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ u32 pay_len, mss, queue;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
- unsigned long flags;
- u32 pay_len, mss;
dma_addr_t des;
- int i;
+ bool set_ic;
+
+ /* Always insert VLAN tag to SKB payload for TSO frames.
+ *
+ * Never insert VLAN tag by HW, since segments splited by
+ * TSO engine will be un-tagged by mistake.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ priv->xstats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ queue = skb_get_queue_mapping(skb);
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -4172,9 +4432,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- /* Check if VLAN can be inserted by HW */
- has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
-
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -4184,37 +4441,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &tx_q->dma_tx[first_entry];
first = desc;
- if (has_vlan)
- stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
-
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- tx_q->tx_skbuff_dma[first_entry].buf = des;
- tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
- tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
-
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- des += proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
+
+ /* In case two or more DMA transmit descriptors are allocated for this
+ * non-paged SKB data, the DMA buffer address should be saved to
+ * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
+ * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
+ * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
+ * since the tail areas of the DMA buffer can be accessed by DMA engine
+ * sooner or later.
+ * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
+ * corresponding to the last descriptor, stmmac_tx_clean() will unmap
+ * this DMA buffer right after the DMA engine completely finishes the
+ * full buffer transmission.
+ */
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
@@ -4280,19 +4532,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
- tx_q->txq_stats.tx_tso_frames++;
- tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4301,11 +4551,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4327,6 +4575,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4341,6 +4590,28 @@ dma_map_err:
}
/**
+ * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
+ * @skb: socket buffer to check
+ *
+ * Check if a packet has an ethertype that will trigger the IP header checks
+ * and IP/TCP checksum engine of the stmmac core.
+ *
+ * Return: true if the ethertype can trigger the checksum engine, false
+ * otherwise
+ */
+static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
+{
+ int depth = 0;
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
+ &depth);
+
+ return (depth <= ETH_HLEN) &&
+ (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -4350,35 +4621,50 @@ dma_map_err:
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int first_entry, tx_packets, enh_desc;
+ bool enh_desc, has_vlan, set_ic, is_jumbo = false;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
- int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, tx_packets;
int gso = skb_shinfo(skb)->gso_type;
+ struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ int i, csum_insertion = 0;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
+ (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4399,6 +4685,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+ /* DWMAC IPs can be synthesized to support tx coe only for a few tx
+ * queues. In that case, checksum offloading for those queues that don't
+ * support tx coe needs to fallback to software checksum calculation.
+ *
+ * Packets that won't trigger the COE e.g. most DSA-tagged packets will
+ * also have to be checksummed in software.
+ */
+ if (csum_insertion &&
+ (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
+ !stmmac_has_ip_ethertype(skb))) {
+ if (unlikely(skb_checksum_help(skb)))
+ goto dma_map_err;
+ csum_insertion = !csum_insertion;
+ }
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -4517,17 +4817,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4573,8 +4871,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
-
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4582,6 +4880,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4638,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4649,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
+ if (priv->sph_active)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
@@ -4674,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ /* Wake up Rx DMA from the suspend state if required */
+ stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
}
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
@@ -4684,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
- if (priv->sph && len)
+ if (priv->sph_active && len)
return 0;
/* First descriptor, get split header length */
stmmac_get_rx_header_len(priv, p, &hlen);
- if (priv->sph && hlen) {
+ if (priv->sph_active && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
}
@@ -4712,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
unsigned int plen = 0;
/* Not split header, buffer is not available */
- if (!priv->sph)
+ if (!priv->sph_active)
return 0;
/* Not last descriptor */
@@ -4728,7 +5029,9 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4737,6 +5040,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdpf->len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -4773,7 +5083,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
@@ -4784,15 +5094,14 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
@@ -4916,9 +5225,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ch->rxtx_napi,
- xdp->data_end - xdp->data_hard_start,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start);
if (unlikely(!skb))
return NULL;
@@ -4934,12 +5242,11 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -4950,10 +5257,15 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -4964,10 +5276,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n++;
- rx_q->rxq_stats.rx_bytes += len;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5040,6 +5352,7 @@ static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
@@ -5048,7 +5361,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5084,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -5170,7 +5482,7 @@ read_again:
/* RX buffer is good and fit into a XSK pool buffer */
buf->xdp->data_end = buf->xdp->data + buf1_len;
- xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(buf->xdp);
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
@@ -5203,9 +5515,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5233,6 +5545,7 @@ read_again:
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5242,12 +5555,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5283,10 +5596,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
buf2_len = 0;
entry = next_entry;
@@ -5317,7 +5630,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5335,10 +5648,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5360,8 +5669,10 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5413,22 +5724,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -5436,9 +5751,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->page = NULL;
}
@@ -5448,9 +5760,6 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- skb_mark_for_recycle(skb);
buf->sec_page = NULL;
}
@@ -5463,10 +5772,18 @@ drain_data:
/* Got entire packet into SKB. Finish it. */
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
+ (status & csum_none))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5494,11 +5811,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_packets += rx_packets;
- rx_q->rxq_stats.rx_bytes += rx_bytes;
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5511,15 +5828,14 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_rx_queue *rx_q;
+ struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5538,17 +5854,17 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_txq_stats *txq_stats;
+ bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
- work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5559,6 +5875,10 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return work_done;
}
@@ -5567,23 +5887,23 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ bool tx_pending_packets = false;
int rx_done, tx_done, rxtx_done;
- struct stmmac_rx_queue *rx_q;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_rxq_stats *rxq_stats;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
- tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
rx_done = stmmac_rx_zc(priv, budget, chan);
@@ -5608,6 +5928,10 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (tx_pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return min(rxtx_done, budget - 1);
}
@@ -5635,6 +5959,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5688,7 +6015,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return PTR_ERR(dma_conf);
}
- stmmac_release(dev);
+ __stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
if (ret) {
@@ -5703,7 +6030,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_set_rx_mode(dev);
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
netdev_update_features(dev);
return 0;
@@ -5754,57 +6081,24 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- if (priv->sph_cap) {
- bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_capable) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
- return 0;
-}
-
-static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
-{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
-
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
-
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
-
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- MPACKET_RESPONSE);
- }
-
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
-
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ priv->hw->hw_vlan_en = true;
+ else
+ priv->hw->hw_vlan_en = false;
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
+ phylink_rx_clk_stop_block(priv->phylink);
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
+ return 0;
}
static void stmmac_common_interrupt(struct stmmac_priv *priv)
@@ -5815,25 +6109,21 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
u32 queue;
bool xmac;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
pm_wakeup_event(priv->device, 0);
if (priv->dma_cap.estsel)
- stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
- if (priv->dma_cap.fpesel) {
- int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
- priv->dev);
-
- stmmac_fpe_event_status(priv, status);
- }
+ if (stmmac_fpe_supported(priv))
+ stmmac_fpe_irq_status(priv);
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || xmac) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
if (unlikely(status)) {
@@ -5844,19 +6134,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
priv->tx_path_in_lpi_mode = false;
}
- for (queue = 0; queue < queues_count; queue++) {
- status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- }
-
- /* PCS link status */
- if (priv->hw->pcs &&
- !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
- if (priv->xstats.pcs_link)
- netif_carrier_on(priv->dev);
- else
- netif_carrier_off(priv->dev);
- }
+ for (queue = 0; queue < queues_count; queue++)
+ stmmac_host_mtl_irq_status(priv, priv->hw, queue);
stmmac_timestamp_interrupt(priv, priv);
}
@@ -5882,8 +6161,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
+ /* Check ASP error if it isn't delivered via an individual IRQ */
+ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
@@ -5900,11 +6179,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5920,11 +6194,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5946,11 +6215,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5977,11 +6241,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -5991,33 +6250,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling receive - used by NETCONSOLE and other diagnostic tools
- * to allow network I/O with interrupts disabled.
- */
-static void stmmac_poll_controller(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int i;
-
- /* If adapter is down, do nothing */
- if (test_bit(STMMAC_DOWN, &priv->state))
- return;
-
- if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
- for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
-
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
- } else {
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
- }
-}
-#endif
-
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
@@ -6041,12 +6273,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -6090,6 +6316,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -6137,7 +6365,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6151,30 +6381,23 @@ static struct dentry *stmmac_fs_dir;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq, dma_addr_t dma_phy_addr)
{
- int i;
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
struct dma_desc *p = (struct dma_desc *)head;
+ unsigned int desc_size;
dma_addr_t dma_addr;
+ int i;
+ desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
for (i = 0; i < size; i++) {
- if (extend_desc) {
- dma_addr = dma_phy_addr + i * sizeof(*ep);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(ep->basic.des0),
- le32_to_cpu(ep->basic.des1),
- le32_to_cpu(ep->basic.des2),
- le32_to_cpu(ep->basic.des3));
- ep++;
- } else {
- dma_addr = dma_phy_addr + i * sizeof(*p);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(p->des0), le32_to_cpu(p->des1),
- le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ dma_addr = dma_phy_addr + i * desc_size;
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ if (extend_desc)
+ p = &(++ep)->basic;
+ else
p++;
- }
- seq_printf(seq, "\n");
}
}
@@ -6261,7 +6484,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq,
"\tNumber of Additional MAC address registers: %d\n",
priv->dma_cap.multi_addr);
@@ -6285,7 +6508,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
seq_printf(seq, "\tTimestamp System Time Source: %s\n",
dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
@@ -6294,7 +6517,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
- priv->plat->has_xgmac) {
+ priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6412,11 +6635,7 @@ static int stmmac_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_CHANGENAME:
- if (priv->dbgfs_dir)
- priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
- priv->dbgfs_dir,
- stmmac_fs_dir,
- dev->name);
+ debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
break;
}
done:
@@ -6482,7 +6701,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- __le16 pmatch = 0;
+ u16 pmatch = 0;
int count = 0;
u16 vid = 0;
@@ -6497,13 +6716,16 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
if (count > 2) /* VID = 0 always passes filter */
return -EOPNOTSUPP;
- pmatch = cpu_to_le16(vid);
+ pmatch = vid;
hash = 0;
}
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6535,6 +6757,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6806,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* Adjust Split header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
@@ -6846,8 +7071,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -6872,7 +7096,6 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
@@ -6924,30 +7147,33 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
int q;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
u64 tx_packets;
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
u64 rx_packets;
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -6978,15 +7204,14 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = stmmac_poll_controller,
-#endif
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7055,7 +7280,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
- priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
(BIT(priv->dma_cap.hash_tb_sz) << 5);
@@ -7093,11 +7317,42 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->pmt) {
dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
+ devm_pm_set_wake_irq(priv->device, priv->wol_irq);
}
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ if (priv->dma_cap.number_rx_queues &&
+ priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
+ dev_warn(priv->device,
+ "Number of Rx queues (%u) exceeds dma capability\n",
+ priv->plat->rx_queues_to_use);
+ priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
+ }
+ if (priv->dma_cap.number_tx_queues &&
+ priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
+ dev_warn(priv->device,
+ "Number of Tx queues (%u) exceeds dma capability\n",
+ priv->plat->tx_queues_to_use);
+ priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
+ }
+
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ dev_warn(priv->device,
+ "Rx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->rx_fifo_size);
+ priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
+ }
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ dev_warn(priv->device,
+ "Tx FIFO size (%u) exceeds dma capability\n",
+ priv->plat->tx_fifo_size);
+ priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
+ }
+
priv->hw->vlan_fail_q_en =
(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
@@ -7114,13 +7369,21 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
+ priv->plat->core_type == DWMAC_CORE_XGMAC) &&
+ !priv->plat->riwt_off) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
+ /* Unimplemented PCS init (as indicated by stmmac_do_callback()
+ * perversely returning -EINVAL) is non-fatal.
+ */
+ ret = stmmac_mac_pcs_init(priv);
+ if (ret != -EINVAL)
+ return ret;
+
return 0;
}
@@ -7216,68 +7479,6 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
-{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
-
- netdev_info(priv->dev, "configured FPE\n");
-
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
-
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- MPACKET_VERIFY);
- }
- /* Sleep then retry */
- msleep(500);
- }
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
-}
-
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
-{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
-
- priv->plat->fpe_cfg->hs_enable = enable;
- }
-}
-
static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct stmmac_xdp_buff *ctx = (void *)_ctx;
@@ -7291,7 +7492,7 @@ static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
return -ENODATA;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc_contains_ts = ndesc;
/* Check if timestamp is available */
@@ -7309,19 +7510,133 @@ static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
.xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
};
-/**
- * stmmac_dvr_probe
- * @device: device pointer
- * @plat_dat: platform data pointer
- * @res: stmmac resource pointer
- * Description: this is the main probe function used to
- * call the alloc_etherdev, allocate the priv structure.
- * Return:
- * returns 0 on success, otherwise errno.
- */
-int stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *res)
+static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ priv->tsfupdt_coarse = ctx->val.vbool;
+
+ if (priv->tsfupdt_coarse)
+ priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
+ else
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
+
+ /* In Coarse mode, we can use a smaller subsecond increment, let's
+ * reconfigure the systime, subsecond increment and addend.
+ */
+ stmmac_update_subsecond_increment(priv);
+
+ return 0;
+}
+
+static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ ctx->val.vbool = priv->tsfupdt_coarse;
+
+ return 0;
+}
+
+static const struct devlink_param stmmac_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ stmmac_dl_ts_coarse_get,
+ stmmac_dl_ts_coarse_set, NULL),
+};
+
+/* None of the generic devlink parameters are implemented */
+static const struct devlink_ops stmmac_devlink_ops = {};
+
+static int stmmac_register_devlink(struct stmmac_priv *priv)
+{
+ struct stmmac_devlink_priv *dl_priv;
+ int ret;
+
+ /* For now, what is exposed over devlink is only relevant when
+ * timestamping is available and we have a valid ptp clock rate
+ */
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
+ !priv->plat->clk_ptp_rate)
+ return 0;
+
+ priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
+ priv->device);
+ if (!priv->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->stmmac_priv = priv;
+
+ ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ if (ret)
+ goto dl_free;
+
+ devlink_register(priv->devlink);
+ return 0;
+
+dl_free:
+ devlink_free(priv->devlink);
+
+ return ret;
+}
+
+static void stmmac_unregister_devlink(struct stmmac_priv *priv)
+{
+ if (!priv->devlink)
+ return;
+
+ devlink_unregister(priv->devlink);
+ devlink_params_unregister(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ devlink_free(priv->devlink);
+}
+
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ int i;
+
+ plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
+ if (!plat_dat)
+ return NULL;
+
+ /* Set the defaults:
+ * - phy autodetection
+ * - determine GMII_Address CR field from CSR clock
+ * - allow MTU up to JUMBO_LEN
+ * - hash table size
+ * - one unicast filter entry
+ */
+ plat_dat->phy_addr = -1;
+ plat_dat->clk_csr = -1;
+ plat_dat->maxmtu = JUMBO_LEN;
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat_dat->unicast_filter_entries = 1;
+
+ /* Set the mtl defaults */
+ plat_dat->tx_queues_to_use = 1;
+ plat_dat->rx_queues_to_use = 1;
+
+ /* Setup the default RX queue channel map */
+ for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
+ plat_dat->rx_queues_cfg[i].chan = i;
+
+ return plat_dat;
+}
+EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
+
+static int __stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
@@ -7340,12 +7655,19 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7355,6 +7677,7 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
+ priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
@@ -7384,8 +7707,7 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
@@ -7408,6 +7730,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
@@ -7423,6 +7748,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+ ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
@@ -7436,7 +7762,7 @@ int stmmac_dvr_probe(struct device *device,
if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
@@ -7445,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen &&
!(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph_cap = true;
- priv->sph = priv->sph_cap;
+ priv->sph_capable = true;
+ priv->sph_active = priv->sph_capable;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -7489,15 +7815,16 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (dwmac_is_xmac(priv->plat->core_type)) {
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+ }
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -7513,30 +7840,26 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_RXHASH;
ndev->vlan_features |= ndev->features;
- /* TSO doesn't work on VLANs yet */
- ndev->vlan_features &= ~NETIF_F_TSO;
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
- if (priv->plat->has_xgmac)
+
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
ndev->max_mtu = XGMAC_JUMBO_LEN;
- else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
- /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
- * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+
+ /* Warn if the platform's maxmtu is smaller than the minimum MTU,
+ * otherwise clamp the maximum MTU above to the platform's maxmtu.
*/
- if ((priv->plat->maxmtu < ndev->max_mtu) &&
- (priv->plat->maxmtu >= ndev->min_mtu))
- ndev->max_mtu = priv->plat->maxmtu;
- else if (priv->plat->maxmtu < ndev->min_mtu)
+ if (priv->plat->maxmtu < ndev->min_mtu)
dev_warn(priv->device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
-
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ else if (priv->plat->maxmtu < ndev->max_mtu)
+ ndev->max_mtu = priv->plat->maxmtu;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -7545,16 +7868,7 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
- /* If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed. Viceversa the driver'll try to
- * set the MDC clock dynamically according to the csr actual
- * clock input.
- */
- if (priv->plat->clk_csr >= 0)
- priv->clk_csr = priv->plat->clk_csr;
- else
- stmmac_clk_csr_set(priv);
+ stmmac_fpe_init(priv);
stmmac_check_pcs_mode(priv);
@@ -7563,33 +7877,28 @@ int stmmac_dvr_probe(struct device *device,
if (!pm_runtime_enabled(device))
pm_runtime_enable(device);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- dev_err_probe(priv->device, ret,
- "%s: MDIO bus (id: %d) registration failed\n",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
- }
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err_probe(priv->device, ret,
+ "MDIO bus (id: %d) registration failed\n",
+ priv->plat->bus_id);
+ goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
- if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
- ret = stmmac_xpcs_setup(priv->mii);
- if (ret)
- goto error_xpcs_setup;
- }
+ ret = stmmac_pcs_setup(ndev);
+ if (ret)
+ goto error_pcs_setup;
- ret = stmmac_phy_setup(priv);
+ ret = stmmac_phylink_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
goto error_phy_setup;
}
+ ret = stmmac_register_devlink(priv);
+ if (ret)
+ goto error_devlink_setup;
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@ -7612,12 +7921,13 @@ int stmmac_dvr_probe(struct device *device,
return ret;
error_netdev_register:
+ stmmac_unregister_devlink(priv);
+error_devlink_setup:
phylink_destroy(priv->phylink);
-error_xpcs_setup:
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+ stmmac_pcs_clean(ndev);
+error_pcs_setup:
+ stmmac_mdio_unregister(ndev);
error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
@@ -7627,6 +7937,34 @@ error_wq_init:
return ret;
}
+
+/**
+ * stmmac_dvr_probe
+ * @dev: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ if (plat_dat->init) {
+ ret = plat_dat->init(dev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = __stmmac_dvr_probe(dev, plat_dat, res);
+ if (ret && plat_dat->exit)
+ plat_dat->exit(dev, plat_dat->bsp_priv);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
@@ -7644,27 +7982,30 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
- netif_carrier_off(ndev);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
+ stmmac_unregister_devlink(priv);
+
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+
+ stmmac_pcs_clean(ndev);
+ stmmac_mdio_unregister(ndev);
+
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(dev, priv->plat->bsp_priv);
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7693,9 +8034,9 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
+ timer_delete_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
@@ -7705,7 +8046,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@ -7716,26 +8057,15 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ phylink_suspend(priv->phylink, !!priv->wolopts);
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
+ if (stmmac_fpe_supported(priv))
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->plat->suspend)
+ return priv->plat->suspend(dev, priv->plat->bsp_priv);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7788,6 +8118,12 @@ int stmmac_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
+ if (priv->plat->resume) {
+ ret = priv->plat->resume(dev, priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
if (!netif_running(ndev))
return 0;
@@ -7797,7 +8133,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (priv->wolopts) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -7819,16 +8155,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7836,16 +8168,33 @@ int stmmac_resume(struct device *dev)
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);
- stmmac_hw_setup(ndev, false);
+ ret = stmmac_hw_setup(ndev);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+ return ret;
+ }
+
+ stmmac_init_timestamping(priv);
+
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
rtnl_unlock();
netif_device_attach(ndev);
@@ -7854,6 +8203,10 @@ int stmmac_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_resume);
+/* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
+DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
+EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -7868,9 +8221,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;