diff options
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c')
| -rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 946 |
1 files changed, 594 insertions, 352 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..b155e71aac51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -1,19 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /******************************************************************************* STMMAC Ethtool support Copyright (C) 2007-2009 STMicroelectronics Ltd - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ @@ -21,28 +11,39 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/mii.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/net_tstamp.h> -#include <asm/io.h> #include "stmmac.h" +#include "stmmac_fpe.h" #include "dwmac_dma.h" +#include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" +#define XGMAC_ETHTOOL_NAME "st_xgmac" + +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { - char stat_string[ETH_GSTRING_LEN]; + char stat_string[ETH_GSTRING_LEN] __nonstring; int sizeof_stat; int stat_offset; }; #define STMMAC_STAT(m) \ - { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ + { #m, sizeof_field(struct stmmac_extra_stats, m), \ offsetof(struct stmmac_priv, xstats.m)} static const struct stmmac_stats stmmac_gstrings_stats[] = { @@ -75,6 +76,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(rx_missed_cntr), STMMAC_STAT(rx_overflow_cntr), STMMAC_STAT(rx_vlan), + STMMAC_STAT(rx_split_hdr_pkt_n), /* Tx/Rx IRQ error info */ STMMAC_STAT(tx_undeflow_irq), STMMAC_STAT(tx_process_stopped_irq), @@ -88,14 +90,6 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { /* Tx/Rx IRQ Events */ STMMAC_STAT(rx_early_irq), STMMAC_STAT(threshold), - STMMAC_STAT(tx_pkt_n), - STMMAC_STAT(rx_pkt_n), - STMMAC_STAT(normal_irq_n), - STMMAC_STAT(rx_normal_irq_n), - STMMAC_STAT(napi_poll), - STMMAC_STAT(tx_normal_irq_n), - STMMAC_STAT(tx_clean), - STMMAC_STAT(tx_set_ic_bit), STMMAC_STAT(irq_receive_pmt_irq_n), /* MMC info */ STMMAC_STAT(mmc_tx_irq_n), @@ -162,15 +156,35 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(mtl_rx_fifo_ctrl_active), STMMAC_STAT(mac_rx_frame_ctrl_fifo), STMMAC_STAT(mac_gmii_rx_proto_engine), - /* TSO */ - STMMAC_STAT(tx_tso_frames), - STMMAC_STAT(tx_tso_nfrags), + /* EST */ + STMMAC_STAT(mtl_est_cgce), + STMMAC_STAT(mtl_est_hlbs), + STMMAC_STAT(mtl_est_hlbf), + STMMAC_STAT(mtl_est_btre), + STMMAC_STAT(mtl_est_btrlm), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) +/* statistics collected in queue which will be summed up for all TX or RX + * queues, or summed up for both TX and RX queues(napi_poll, normal_irq_n). + */ +static const char stmmac_qstats_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_normal_irq_n", + "tx_pkt_n", + "tx_normal_irq_n", + "tx_clean", + "tx_set_ic_bit", + "tx_tso_frames", + "tx_tso_nfrags", + "normal_irq_n", + "napi_poll", +}; +#define STMMAC_QSTATS ARRAY_SIZE(stmmac_qstats_string) + /* HW MAC Management counters (if supported) */ #define STMMAC_MMC_STAT(m) \ - { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ + { #m, sizeof_field(struct stmmac_counters, m), \ offsetof(struct stmmac_priv, mmc.m)} static const struct stmmac_stats stmmac_mmc[] = { @@ -199,6 +213,9 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_tx_excessdef), STMMAC_MMC_STAT(mmc_tx_pause_frame), STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), + STMMAC_MMC_STAT(mmc_tx_oversize_g), + STMMAC_MMC_STAT(mmc_tx_lpi_usec), + STMMAC_MMC_STAT(mmc_tx_lpi_tran), STMMAC_MMC_STAT(mmc_rx_framecount_gb), STMMAC_MMC_STAT(mmc_rx_octetcount_gb), STMMAC_MMC_STAT(mmc_rx_octetcount_g), @@ -223,8 +240,12 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_fifo_overflow), STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), STMMAC_MMC_STAT(mmc_rx_watchdog_error), - STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), - STMMAC_MMC_STAT(mmc_rx_ipc_intr), + STMMAC_MMC_STAT(mmc_rx_error), + STMMAC_MMC_STAT(mmc_rx_lpi_usec), + STMMAC_MMC_STAT(mmc_rx_lpi_tran), + STMMAC_MMC_STAT(mmc_rx_discard_frames_gb), + STMMAC_MMC_STAT(mmc_rx_discard_octets_gb), + STMMAC_MMC_STAT(mmc_rx_align_err_frames), STMMAC_MMC_STAT(mmc_rx_ipv4_gd), STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), @@ -253,118 +274,56 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), + STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr), + STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr), + STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr), + STMMAC_MMC_STAT(mmc_tx_hold_req_cntr), + STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr), + STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr), + STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr), }; #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) +static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = { + "tx_pkt_n", + "tx_irq_n", +#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string) +}; + +static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_irq_n", +#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string) +}; + static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct stmmac_priv *priv = netdev_priv(dev); - if (priv->plat->has_gmac || priv->plat->has_gmac4) - strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + if (priv->plat->core_type == DWMAC_CORE_GMAC || + priv->plat->core_type == DWMAC_CORE_GMAC4) + strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else if (priv->plat->core_type == DWMAC_CORE_XGMAC) + strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver)); else - strlcpy(info->driver, MAC100_ETHTOOL_NAME, + strscpy(info->driver, MAC100_ETHTOOL_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + if (priv->plat->pdev) { + strscpy(info->bus_info, pci_name(priv->plat->pdev), + sizeof(info->bus_info)); + } } static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phy = dev->phydev; - - if (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII) { - struct rgmii_adv adv; - u32 supported, advertising, lp_advertising; - - if (!priv->xstats.pcs_link) { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - return 0; - } - cmd->base.duplex = priv->xstats.pcs_duplex; - - cmd->base.speed = priv->xstats.pcs_speed; - - /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) - return -EOPNOTSUPP; /* should never happen indeed */ - - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ - - ethtool_convert_link_mode_to_legacy_u32( - &supported, cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32( - &advertising, cmd->link_modes.advertising); - ethtool_convert_link_mode_to_legacy_u32( - &lp_advertising, cmd->link_modes.lp_advertising); - - if (adv.pause & STMMAC_PCS_PAUSE) - advertising |= ADVERTISED_Pause; - if (adv.pause & STMMAC_PCS_ASYM_PAUSE) - advertising |= ADVERTISED_Asym_Pause; - if (adv.lp_pause & STMMAC_PCS_PAUSE) - lp_advertising |= ADVERTISED_Pause; - if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) - lp_advertising |= ADVERTISED_Asym_Pause; - - /* Reg49[3] always set because ANE is always supported */ - cmd->base.autoneg = ADVERTISED_Autoneg; - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; - lp_advertising |= ADVERTISED_Autoneg; - - if (adv.duplex) { - supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full | - SUPPORTED_10baseT_Full); - advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); - } else { - supported |= (SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); - advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); - } - if (adv.lp_duplex) - lp_advertising |= (ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full); - else - lp_advertising |= (ADVERTISED_1000baseT_Half | - ADVERTISED_100baseT_Half | - ADVERTISED_10baseT_Half); - cmd->base.port = PORT_OTHER; - - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.advertising, advertising); - ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.lp_advertising, lp_advertising); - - return 0; - } - if (phy == NULL) { - pr_err("%s: %s: PHY is not registered\n", - __func__, dev->name); - return -ENODEV; - } - if (!netif_running(dev)) { - pr_err("%s: interface is disabled: we cannot track " - "link speed / duplex setting\n", dev->name); - return -EBUSY; - } - phy_ethtool_ksettings_get(phy, cmd); - return 0; + return phylink_ethtool_ksettings_get(priv->phylink, cmd); } static int @@ -372,34 +331,8 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct stmmac_priv *priv = netdev_priv(dev); - struct phy_device *phy = dev->phydev; - int rc; - - if (priv->hw->pcs & STMMAC_PCS_RGMII || - priv->hw->pcs & STMMAC_PCS_SGMII) { - u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; - - /* Only support ANE */ - if (cmd->base.autoneg != AUTONEG_ENABLE) - return -EINVAL; - - mask &= (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full); - - mutex_lock(&priv->lock); - stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); - mutex_unlock(&priv->lock); - - return 0; - } - rc = phy_ethtool_ksettings_set(phy, cmd); - - return rc; + return phylink_ethtool_ksettings_set(priv->phylink, cmd); } static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) @@ -415,63 +348,84 @@ static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) } -static int stmmac_check_if_running(struct net_device *dev) -{ - if (!netif_running(dev)) - return -EBUSY; - return 0; -} - static int stmmac_ethtool_get_regs_len(struct net_device *dev) { + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->core_type == DWMAC_CORE_XGMAC) + return XGMAC_REGSIZE * 4; + else if (priv->plat->core_type == DWMAC_CORE_GMAC4) + return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } static void stmmac_ethtool_gregs(struct net_device *dev, struct ethtool_regs *regs, void *space) { - u32 *reg_space = (u32 *) space; - struct stmmac_priv *priv = netdev_priv(dev); - - memset(reg_space, 0x0, REG_SPACE_SIZE); + u32 *reg_space = (u32 *) space; stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); + /* Copy DMA registers to where ethtool expects them */ - memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], - NUM_DWMAC1000_DMA_REGS * 4); + if (priv->plat->core_type == DWMAC_CORE_GMAC4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (priv->plat->core_type != DWMAC_CORE_XGMAC) { + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[DMA_BUS_MODE / 4], + NUM_DWMAC1000_DMA_REGS * 4); + } } -static void -stmmac_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) +static int stmmac_nway_reset(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return phylink_ethtool_nway_reset(priv->phylink); +} + +static void stmmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct stmmac_priv *priv = netdev_priv(netdev); - struct rgmii_adv adv_lp; - pause->rx_pause = 0; - pause->tx_pause = 0; + ring->rx_max_pending = DMA_MAX_RX_SIZE; + ring->tx_max_pending = DMA_MAX_TX_SIZE; + ring->rx_pending = priv->dma_conf.dma_rx_size; + ring->tx_pending = priv->dma_conf.dma_tx_size; +} - if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { - pause->autoneg = 1; - if (!adv_lp.pause) - return; - } else { - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, - netdev->phydev->supported) || - linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - netdev->phydev->supported)) - return; - } +static int stmmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending < DMA_MIN_RX_SIZE || + ring->rx_pending > DMA_MAX_RX_SIZE || + !is_power_of_2(ring->rx_pending) || + ring->tx_pending < DMA_MIN_TX_SIZE || + ring->tx_pending > DMA_MAX_TX_SIZE || + !is_power_of_2(ring->tx_pending)) + return -EINVAL; - pause->autoneg = netdev->phydev->autoneg; + return stmmac_reinit_ringparam(netdev, ring->rx_pending, + ring->tx_pending); +} - if (priv->flow_ctrl & FLOW_RX) - pause->rx_pause = 1; - if (priv->flow_ctrl & FLOW_TX) - pause->tx_pause = 1; +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + phylink_ethtool_get_pauseparam(priv->phylink, pause); } static int @@ -479,39 +433,84 @@ stmmac_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); - u32 tx_cnt = priv->plat->tx_queues_to_use; - struct phy_device *phy = netdev->phydev; - int new_pause = FLOW_OFF; - struct rgmii_adv adv_lp; - - if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { - pause->autoneg = 1; - if (!adv_lp.pause) - return -EOPNOTSUPP; - } else { - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phy->supported) || - linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phy->supported)) - return -EOPNOTSUPP; + + return phylink_ethtool_set_pauseparam(priv->phylink, pause); +} + +static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; } + return total; +} - if (pause->rx_pause) - new_pause |= FLOW_RX; - if (pause->tx_pause) - new_pause |= FLOW_TX; +static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} - priv->flow_ctrl = new_pause; - phy->autoneg = pause->autoneg; +static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + unsigned int start; + int q; + + for (q = 0; q < tx_cnt; q++) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; + u64 pkt_n; - if (phy->autoneg) { - if (netif_running(netdev)) - return phy_start_aneg(phy); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); } - stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl, - priv->pause, tx_cnt); - return 0; + for (q = 0; q < rx_cnt; q++) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; + u64 pkt_n; + + do { + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); + } } static void stmmac_get_ethtool_stats(struct net_device *dev, @@ -520,8 +519,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; + u64 napi_poll = 0, normal_irq_n = 0; + int i, j = 0, pos, ret; unsigned long count; - int i, j = 0, ret; + unsigned int start; if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { @@ -532,12 +533,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, } /* Update the DMA HW counters for dwmac10/100 */ - ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, - priv->ioaddr); + ret = stmmac_dma_diagnostic_fr(priv, &priv->xstats, priv->ioaddr); if (ret) { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { - dwmac_mmc_read(priv->mmcaddr, &priv->mmc); + stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc); for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { char *p; @@ -548,8 +548,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, (*(u32 *)p); } } - if (priv->eee_enabled) { - int val = phy_get_eee_err(dev->phydev); + if (priv->dma_cap.eee) { + int val = phylink_get_eee_err(priv->phylink); if (val) priv->xstats.phy_eee_wakeup_error_n = val; } @@ -564,16 +564,73 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } + + pos = j; + for (i = 0; i < rx_queues_count; i++) { + struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq; + + j = pos; + do { + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); + } + + pos = j; + for (i = 0; i < tx_queues_count; i++) { + struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq; + + j = pos; + do { + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); + } + normal_irq_n += priv->xstats.rx_early_irq; + data[j++] = normal_irq_n; + data[j++] = napi_poll; + + stmmac_get_per_qstats(priv, &data[j]); } static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; int i, len, safety_len = 0; switch (sset) { case ETH_SS_STATS: - len = STMMAC_STATS_LEN; + len = STMMAC_STATS_LEN + STMMAC_QSTATS + + STMMAC_TXQ_STATS * tx_cnt + + STMMAC_RXQ_STATS * rx_cnt; if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; @@ -589,11 +646,35 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) } return len; + case ETH_SS_TEST: + return stmmac_selftest_get_count(priv); default: return -EOPNOTSUPP; } } +static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + + for (q = 0; q < tx_cnt; q++) { + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_tx_string[stat]); + data += ETH_GSTRING_LEN; + } + } + for (q = 0; q < rx_cnt; q++) { + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_rx_string[stat]); + data += ETH_GSTRING_LEN; + } + } +} + static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; @@ -620,10 +701,17 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; } for (i = 0; i < STMMAC_STATS_LEN; i++) { - memcpy(p, stmmac_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); + memcpy(p, stmmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < STMMAC_QSTATS; i++) { + memcpy(p, stmmac_qstats_string[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + stmmac_get_qstats_string(priv, p); + break; + case ETH_SS_TEST: + stmmac_selftest_get_strings(priv, p); break; default: WARN_ON(1); @@ -636,93 +724,41 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - mutex_lock(&priv->lock); - if (device_can_wakeup(priv->device)) { - wol->supported = WAKE_MAGIC | WAKE_UCAST; - wol->wolopts = priv->wolopts; - } - mutex_unlock(&priv->lock); + return phylink_ethtool_get_wol(priv->phylink, wol); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - u32 support = WAKE_MAGIC | WAKE_UCAST; - - /* By default almost all GMAC devices support the WoL via - * magic frame but we can disable it if the HW capability - * register shows no support for pmt_magic_frame. */ - if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) - wol->wolopts &= ~WAKE_MAGIC; - if (!device_can_wakeup(priv->device)) - return -EINVAL; - - if (wol->wolopts & ~support) - return -EINVAL; - - if (wol->wolopts) { - pr_info("stmmac: wakeup enable\n"); - device_set_wakeup_enable(priv->device, 1); - enable_irq_wake(priv->wol_irq); - } else { - device_set_wakeup_enable(priv->device, 0); - disable_irq_wake(priv->wol_irq); - } - - mutex_lock(&priv->lock); - priv->wolopts = wol->wolopts; - mutex_unlock(&priv->lock); - - return 0; + return phylink_ethtool_set_wol(priv->phylink, wol); } static int stmmac_ethtool_op_get_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); - if (!priv->dma_cap.eee) - return -EOPNOTSUPP; - - edata->eee_enabled = priv->eee_enabled; - edata->eee_active = priv->eee_active; - edata->tx_lpi_timer = priv->tx_lpi_timer; - - return phy_ethtool_get_eee(dev->phydev, edata); + return phylink_ethtool_get_eee(priv->phylink, edata); } static int stmmac_ethtool_op_set_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) - stmmac_disable_eee_mode(priv); - else { - /* We are asking for enabling the EEE but it is safe - * to verify all by invoking the eee_init function. - * In case of failure it will return an error. - */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) - return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; - } - - return phy_ethtool_set_eee(dev->phydev, edata); + return phylink_ethtool_set_eee(priv->phylink, edata); } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (usec * (clk / 1000000)) / 256; } @@ -731,50 +767,112 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (riwt * 256) / (clk / 1000000); } -static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) +static int __stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) { struct stmmac_priv *priv = netdev_priv(dev); + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; - ec->tx_coalesce_usecs = priv->tx_coal_timer; - ec->tx_max_coalesced_frames = priv->tx_coal_frames; + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); - if (priv->use_riwt) - ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); + if (queue < 0) + queue = 0; + else if (queue >= max_cnt) + return -EINVAL; + + if (queue < tx_cnt) { + ec->tx_coalesce_usecs = priv->tx_coal_timer[queue]; + ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue]; + } else { + ec->tx_coalesce_usecs = 0; + ec->tx_max_coalesced_frames = 0; + } + + if (priv->use_riwt && queue < rx_cnt) { + ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue]; + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue], + priv); + } else { + ec->rx_max_coalesced_frames = 0; + ec->rx_coalesce_usecs = 0; + } return 0; } -static int stmmac_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) +static int stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_get_coalesce(dev, ec, -1); +} + +static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_get_coalesce(dev, ec, queue); +} + +static int __stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + int queue) { struct stmmac_priv *priv = netdev_priv(dev); - u32 rx_cnt = priv->plat->rx_queues_to_use; + bool all_queues = false; unsigned int rx_riwt; + u32 max_cnt; + u32 rx_cnt; + u32 tx_cnt; - /* Check not supported parameters */ - if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || - (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || - (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || - (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || - (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || - (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || - (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || - (ec->rx_max_coalesced_frames_high) || - (ec->tx_max_coalesced_frames_irq) || - (ec->stats_block_coalesce_usecs) || - (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) - return -EOPNOTSUPP; + rx_cnt = priv->plat->rx_queues_to_use; + tx_cnt = priv->plat->tx_queues_to_use; + max_cnt = max(rx_cnt, tx_cnt); - if (ec->rx_coalesce_usecs == 0) + if (queue < 0) + all_queues = true; + else if (queue >= max_cnt) return -EINVAL; + if (priv->use_riwt) { + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) + return -EINVAL; + + if (all_queues) { + int i; + + for (i = 0; i < rx_cnt; i++) { + priv->rx_riwt[i] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, i); + priv->rx_coal_frames[i] = + ec->rx_max_coalesced_frames; + } + } else if (queue < rx_cnt) { + priv->rx_riwt[queue] = rx_riwt; + stmmac_rx_watchdog(priv, priv->ioaddr, + rx_riwt, queue); + priv->rx_coal_frames[queue] = + ec->rx_max_coalesced_frames; + } + } + if ((ec->tx_coalesce_usecs == 0) && (ec->tx_max_coalesced_frames == 0)) return -EINVAL; @@ -783,24 +881,136 @@ static int stmmac_set_coalesce(struct net_device *dev, (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) return -EINVAL; - rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + if (all_queues) { + int i; - if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) - return -EINVAL; - else if (!priv->use_riwt) + for (i = 0; i < tx_cnt; i++) { + priv->tx_coal_frames[i] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[i] = + ec->tx_coalesce_usecs; + } + } else if (queue < tx_cnt) { + priv->tx_coal_frames[queue] = + ec->tx_max_coalesced_frames; + priv->tx_coal_timer[queue] = + ec->tx_coalesce_usecs; + } + + return 0; +} + +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + return __stmmac_set_coalesce(dev, ec, -1); +} + +static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __stmmac_set_coalesce(dev, ec, queue); +} + +static int stmmac_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->plat->rx_queues_to_use; + break; + default: return -EOPNOTSUPP; + } - /* Only copy relevant parameters, ignore all others. */ - priv->tx_coal_frames = ec->tx_max_coalesced_frames; - priv->tx_coal_timer = ec->tx_coalesce_usecs; - priv->rx_riwt = rx_riwt; - stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); + return 0; +} + +static u32 stmmac_get_rxfh_key_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return sizeof(priv->rss.key); +} + +static u32 stmmac_get_rxfh_indir_size(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return ARRAY_SIZE(priv->rss.table); +} + +static int stmmac_get_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if (rxfh->indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + rxfh->indir[i] = priv->rss.table[i]; + } + + if (rxfh->key) + memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key)); + rxfh->hfunc = ETH_RSS_HASH_TOP; return 0; } +static int stmmac_set_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i; + + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (rxfh->indir) { + for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) + priv->rss.table[i] = rxfh->indir[i]; + } + + if (rxfh->key) + memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key)); + + return stmmac_rss_configure(priv, priv->hw, &priv->rss, + priv->plat->rx_queues_to_use); +} + +static void stmmac_get_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + chan->rx_count = priv->plat->rx_queues_to_use; + chan->tx_count = priv->plat->tx_queues_to_use; + chan->max_rx = priv->dma_cap.number_rx_queues; + chan->max_tx = priv->dma_cap.number_tx_queues; +} + +static int stmmac_set_channels(struct net_device *dev, + struct ethtool_channels *chan) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (chan->rx_count > priv->dma_cap.number_rx_queues || + chan->tx_count > priv->dma_cap.number_tx_queues || + !chan->rx_count || !chan->tx_count) + return -EINVAL; + + return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count); +} + static int stmmac_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct stmmac_priv *priv = netdev_priv(dev); @@ -808,13 +1018,13 @@ static int stmmac_get_ts_info(struct net_device *dev, info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (priv->ptp_clock) info->phc_index = ptp_clock_index(priv->ptp_clock); + else + info->phc_index = 0; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -834,54 +1044,76 @@ static int stmmac_get_ts_info(struct net_device *dev, return ethtool_op_get_ts_info(dev, info); } -static int stmmac_get_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, void *data) +static int stmmac_get_mm(struct net_device *ndev, + struct ethtool_mm_state *state) { - struct stmmac_priv *priv = netdev_priv(dev); - int ret = 0; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 frag_size; - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = priv->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } + if (!stmmac_fpe_supported(priv)) + return -EOPNOTSUPP; + + state->rx_min_frag_size = ETH_ZLEN; + frag_size = stmmac_fpe_get_add_frag_size(priv); + state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size); + + ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state); - return ret; + return 0; } -static int stmmac_set_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, - const void *data) +static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) { - struct stmmac_priv *priv = netdev_priv(dev); - int ret = 0; + struct stmmac_priv *priv = netdev_priv(ndev); + u32 frag_size; + int err; - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - priv->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } + err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, + &frag_size, extack); + if (err) + return err; + + stmmac_fpe_set_add_frag_size(priv, frag_size); + ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg); + + return 0; +} + +static void stmmac_get_mm_stats(struct net_device *ndev, + struct ethtool_mm_stats *s) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_counters *mmc = &priv->mmc; + + if (!priv->dma_cap.rmon) + return; + + stmmac_mmc_read(priv, priv->mmcaddr, mmc); - return ret; + s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr; + s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr; + s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr; + s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr; + s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr; + s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr; } static const struct ethtool_ops stmmac_ethtool_ops = { - .begin = stmmac_check_if_running, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = stmmac_ethtool_getdrvinfo, .get_msglevel = stmmac_ethtool_getmsglevel, .set_msglevel = stmmac_ethtool_setmsglevel, .get_regs = stmmac_ethtool_gregs, .get_regs_len = stmmac_ethtool_get_regs_len, .get_link = ethtool_op_get_link, - .nway_reset = phy_ethtool_nway_reset, + .nway_reset = stmmac_nway_reset, + .get_ringparam = stmmac_get_ringparam, + .set_ringparam = stmmac_set_ringparam, .get_pauseparam = stmmac_get_pauseparam, .set_pauseparam = stmmac_set_pauseparam, + .self_test = stmmac_selftest_run, .get_ethtool_stats = stmmac_get_ethtool_stats, .get_strings = stmmac_get_strings, .get_wol = stmmac_get_wol, @@ -889,13 +1121,23 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_eee = stmmac_ethtool_op_get_eee, .set_eee = stmmac_ethtool_op_set_eee, .get_sset_count = stmmac_get_sset_count, + .get_rxnfc = stmmac_get_rxnfc, + .get_rxfh_key_size = stmmac_get_rxfh_key_size, + .get_rxfh_indir_size = stmmac_get_rxfh_indir_size, + .get_rxfh = stmmac_get_rxfh, + .set_rxfh = stmmac_set_rxfh, .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, - .get_tunable = stmmac_get_tunable, - .set_tunable = stmmac_set_tunable, + .get_per_queue_coalesce = stmmac_get_per_queue_coalesce, + .set_per_queue_coalesce = stmmac_set_per_queue_coalesce, + .get_channels = stmmac_get_channels, + .set_channels = stmmac_set_channels, .get_link_ksettings = stmmac_ethtool_get_link_ksettings, .set_link_ksettings = stmmac_ethtool_set_link_ksettings, + .get_mm = stmmac_get_mm, + .set_mm = stmmac_set_mm, + .get_mm_stats = stmmac_get_mm_stats, }; void stmmac_set_ethtool_ops(struct net_device *netdev) |
