diff options
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_ethtool.c')
| -rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethtool.c | 950 |
1 files changed, 826 insertions, 124 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 600ab56c0008..a14d0ad978e1 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,12 +20,17 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ #include "vmxnet3_int.h" +#include <net/vxlan.h> +#include <net/geneve.h> +#include "vmxnet3_xdp.h" + +#define VXLAN_UDP_PORT 8472 struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; @@ -72,6 +77,10 @@ vmxnet3_tq_driver_stats[] = { copy_skb_header) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, oversized_hdr) }, + { " xdp xmit", offsetof(struct vmxnet3_tq_driver_stats, + xdp_xmit) }, + { " xdp xmit err", offsetof(struct vmxnet3_tq_driver_stats, + xdp_xmit_err) }, }; /* per rq stats maintained by the device */ @@ -102,9 +111,19 @@ vmxnet3_rq_driver_stats[] = { drop_fcs) }, { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, rx_buf_alloc_failure) }, + { " xdp packets", offsetof(struct vmxnet3_rq_driver_stats, + xdp_packets) }, + { " xdp tx", offsetof(struct vmxnet3_rq_driver_stats, + xdp_tx) }, + { " xdp redirects", offsetof(struct vmxnet3_rq_driver_stats, + xdp_redirects) }, + { " xdp drops", offsetof(struct vmxnet3_rq_driver_stats, + xdp_drops) }, + { " xdp aborted", offsetof(struct vmxnet3_rq_driver_stats, + xdp_aborted) }, }; -/* gloabl stats maintained by the driver */ +/* global stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ @@ -113,7 +132,7 @@ vmxnet3_global_stats[] = { }; -struct rtnl_link_stats64 * +void vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -160,8 +179,6 @@ vmxnet3_get_stats64(struct net_device *netdev, stats->rx_dropped += drvRxStats->drop_total; stats->multicast += devRxStats->mcastPktsRxOK; } - - return stats; } static int @@ -183,16 +200,22 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) } -/* Should be multiple of 4 */ -#define NUM_TX_REGS 8 -#define NUM_RX_REGS 12 - +/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with + * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. + * Therefore, if any registers are added, removed or modified, then a version + * bump and a corresponding change in the vmxnet3 support for ethtool(8) + * --register-dump would be required. + */ static int vmxnet3_get_regs_len(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + - adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); + + return ((9 /* BAR1 registers */ + + (1 + adapter->intr.num_intrs) + + (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + + (1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) * + sizeof(u32)); } @@ -201,70 +224,204 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, + strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); - drvinfo->testinfo_len = 0; - drvinfo->eedump_len = 0; - drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); } static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { - struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (stringset == ETH_SS_STATS) { - int i, j; - for (j = 0; j < adapter->num_tx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { - memcpy(buf, vmxnet3_tq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); - i++) { - memcpy(buf, vmxnet3_tq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int i, j; + + if (stringset != ETH_SS_STATS) + return; + + for (j = 0; j < adapter->num_tx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + ethtool_puts(&buf, vmxnet3_tq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + ethtool_puts(&buf, vmxnet3_tq_driver_stats[i].desc); + } + + for (j = 0; j < adapter->num_rx_queues; j++) { + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + ethtool_puts(&buf, vmxnet3_rq_dev_stats[i].desc); + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + ethtool_puts(&buf, vmxnet3_rq_driver_stats[i].desc); + } + + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) + ethtool_puts(&buf, vmxnet3_global_stats[i].desc); +} + +netdev_features_t vmxnet3_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + /* If Rx checksum is disabled, then LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* If XDP is enabled, then LRO should not be enabled */ + if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) { + netdev_err(netdev, "LRO is not supported with XDP"); + features &= ~NETIF_F_LRO; + } + + return features; +} + +netdev_features_t vmxnet3_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + /* Validate if the tunneled packet is being offloaded by the device */ + if (VMXNET3_VERSION_GE_4(adapter) && + skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4_proto = 0; + u16 port; + struct udphdr *udph; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } - for (j = 0; j < adapter->num_rx_queues; j++) { - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { - memcpy(buf, vmxnet3_rq_dev_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; - } - for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); - i++) { - memcpy(buf, vmxnet3_rq_driver_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + switch (l4_proto) { + case IPPROTO_UDP: + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + /* Check if offloaded port is supported */ + if (port != GENEVE_UDP_PORT && + port != IANA_VXLAN_UDP_PORT && + port != VXLAN_UDP_PORT) { + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } + break; + default: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } + } + return features; +} + +static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); - for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { - memcpy(buf, vmxnet3_global_stats[i].desc, - ETH_GSTRING_LEN); - buf += ETH_GSTRING_LEN; + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO; + if (features & NETIF_F_GSO_UDP_TUNNEL) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + if (VMXNET3_VERSION_GE_7(adapter)) { + unsigned long flags; + + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD; + } + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD; + } + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_GENEVE_TSO)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO; + } + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_VXLAN_TSO)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO; + } + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD; + } + if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD; + } + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); + adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + + if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) && + !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) && + !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) && + !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) { + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; + } + if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) && + !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) { + netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; } } } +static void vmxnet3_disable_encap_offloads(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + } + if (VMXNET3_VERSION_GE_7(adapter)) { + unsigned long flags; + + adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD | + 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD | + 1UL << VMXNET3_CAP_GENEVE_TSO | + 1UL << VMXNET3_CAP_VXLAN_TSO | + 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD | + 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG); + adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } +} + int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; netdev_features_t changed = features ^ netdev->features; + netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | - NETIF_F_HW_VLAN_CTAG_RX)) { + NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; @@ -272,7 +429,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; - /* update harware LRO capability accordingly */ + /* update hardware LRO capability accordingly */ if (features & NETIF_F_LRO) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_LRO; @@ -287,6 +444,17 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXVLAN; + if ((features & tun_offload_mask) != 0) { + vmxnet3_enable_encap_offloads(netdev, features); + adapter->shared->devRead.misc.uptFeatures |= + UPT1_F_RXINNEROFLD; + } else if ((features & tun_offload_mask) == 0 && + udp_tun_enabled) { + vmxnet3_disable_encap_offloads(netdev); + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_RXINNEROFLD; + } + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); @@ -323,7 +491,7 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, vmxnet3_tq_driver_stats[i].offset); } - for (j = 0; j < adapter->num_tx_queues; j++) { + for (j = 0; j < adapter->num_rx_queues; j++) { base = (u8 *)&adapter->rqd_start[j].stats; *buf++ = (u64) j; for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) @@ -342,6 +510,12 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, } +/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with + * the version 2 of the vmxnet3 support for ethtool(8) --register-dump. + * Therefore, if any registers are added, removed or modified, then a version + * bump and a corresponding change in the vmxnet3 support for ethtool(8) + * --register-dump would be required. + */ static void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { @@ -351,40 +525,88 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) memset(p, 0, vmxnet3_get_regs_len(netdev)); - regs->version = 1; + regs->version = 2; /* Update vmxnet3_get_regs_len if we want to dump more registers */ - /* make each ring use multiple of 16 bytes */ - for (i = 0; i < adapter->num_tx_queues; i++) { - buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; - buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; - buf[j++] = adapter->tx_queue[i].tx_ring.gen; - buf[j++] = 0; + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); + buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR); + + buf[j++] = adapter->intr.num_intrs; + for (i = 0; i < adapter->intr.num_intrs; i++) { + buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR + + i * VMXNET3_REG_ALIGN); + } - buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; - buf[j++] = adapter->tx_queue[i].comp_ring.gen; - buf[j++] = adapter->tx_queue[i].stopped; - buf[j++] = 0; + buf[j++] = adapter->num_tx_queues; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; + + buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset + + i * VMXNET3_REG_ALIGN); + + buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA); + buf[j++] = tq->tx_ring.size; + buf[j++] = tq->tx_ring.next2fill; + buf[j++] = tq->tx_ring.next2comp; + buf[j++] = tq->tx_ring.gen; + + buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); + buf[j++] = tq->data_ring.size; + buf[j++] = tq->txdata_desc_size; + + buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); + buf[j++] = tq->comp_ring.size; + buf[j++] = tq->comp_ring.next2proc; + buf[j++] = tq->comp_ring.gen; + + buf[j++] = tq->stopped; } + buf[j++] = adapter->num_rx_queues; for (i = 0; i < adapter->num_rx_queues; i++) { - buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; - buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; - buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; - buf[j++] = 0; - - buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; - buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; - buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; - buf[j++] = 0; - - buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; - buf[j++] = adapter->rx_queue[i].comp_ring.gen; - buf[j++] = 0; - buf[j++] = 0; + struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; + + buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset + + i * VMXNET3_REG_ALIGN); + buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset + + i * VMXNET3_REG_ALIGN); + + buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); + buf[j++] = rq->rx_ring[0].size; + buf[j++] = rq->rx_ring[0].next2fill; + buf[j++] = rq->rx_ring[0].next2comp; + buf[j++] = rq->rx_ring[0].gen; + + buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); + buf[j++] = rq->rx_ring[1].size; + buf[j++] = rq->rx_ring[1].next2fill; + buf[j++] = rq->rx_ring[1].next2comp; + buf[j++] = rq->rx_ring[1].gen; + + buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA); + buf[j++] = rq->rx_ring[0].size; + buf[j++] = rq->data_ring.desc_size; + + buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); + buf[j++] = rq->comp_ring.size; + buf[j++] = rq->comp_ring.next2proc; + buf[j++] = rq->comp_ring.gen; } - } @@ -417,51 +639,59 @@ vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static int -vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +vmxnet3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | - SUPPORTED_TP; - ecmd->advertising = ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_INTERNAL; + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP); + ecmd->base.port = PORT_TP; if (adapter->link_speed) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = DUPLEX_FULL; + ecmd->base.speed = adapter->link_speed; + ecmd->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, -1); - ecmd->duplex = -1; + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; } return 0; } - static void vmxnet3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; - param->rx_mini_max_pending = 0; - param->rx_jumbo_max_pending = 0; - - param->rx_pending = adapter->rx_queue[0].rx_ring[0].size; - param->tx_pending = adapter->tx_queue[0].tx_ring.size; - param->rx_mini_pending = 0; - param->rx_jumbo_pending = 0; + param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ? + VMXNET3_RXDATA_DESC_MAX_SIZE : 0; + param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; + + param->rx_pending = adapter->rx_ring_size; + param->tx_pending = adapter->tx_ring_size; + param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ? + adapter->rxdata_desc_size : 0; + param->rx_jumbo_pending = adapter->rx_ring2_size; } - static int vmxnet3_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - u32 new_tx_ring_size, new_rx_ring_size; + u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; + u16 new_rxdata_desc_size; u32 sz; int err = 0; @@ -473,6 +703,10 @@ vmxnet3_set_ringparam(struct net_device *netdev, VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; + if (param->rx_jumbo_pending == 0 || + param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE) + return -EINVAL; + /* if adapter not yet initialized, do nothing */ if (adapter->rx_buf_per_pkt == 0) { netdev_err(netdev, "adapter not completely initialized, " @@ -480,6 +714,13 @@ vmxnet3_set_ringparam(struct net_device *netdev, return -EOPNOTSUPP; } + if (VMXNET3_VERSION_GE_3(adapter)) { + if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) + return -EINVAL; + } else if (param->rx_mini_pending != 0) { + return -EINVAL; + } + /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK; @@ -500,8 +741,32 @@ vmxnet3_set_ringparam(struct net_device *netdev, sz) != 0) return -EINVAL; - if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && - new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { + /* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */ + new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) & + ~VMXNET3_RING_SIZE_MASK; + new_rx_ring2_size = min_t(u32, new_rx_ring2_size, + VMXNET3_RX_RING2_MAX_SIZE); + + /* For v7 and later, keep ring size power of 2 for UPT */ + if (VMXNET3_VERSION_GE_7(adapter)) { + new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size); + new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size); + new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size); + } + + /* rx data ring buffer size has to be a multiple of + * VMXNET3_RXDATA_DESC_SIZE_ALIGN + */ + new_rxdata_desc_size = + (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) & + ~VMXNET3_RXDATA_DESC_SIZE_MASK; + new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size, + VMXNET3_RXDATA_DESC_MAX_SIZE); + + if (new_tx_ring_size == adapter->tx_ring_size && + new_rx_ring_size == adapter->rx_ring_size && + new_rx_ring2_size == adapter->rx_ring2_size && + new_rxdata_desc_size == adapter->rxdata_desc_size) { return 0; } @@ -510,7 +775,7 @@ vmxnet3_set_ringparam(struct net_device *netdev, * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); @@ -522,17 +787,26 @@ vmxnet3_set_ringparam(struct net_device *netdev, vmxnet3_rq_destroy_all(adapter); err = vmxnet3_create_queues(adapter, new_tx_ring_size, - new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); - + new_rx_ring_size, new_rx_ring2_size, + adapter->txdata_desc_size, + new_rxdata_desc_size); if (err) { /* failed, most likely because of OOM, try default * size */ netdev_err(netdev, "failed to apply new sizes, " "try the default ones\n"); + new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; + new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; + new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; + new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? + VMXNET3_DEF_RXDATA_DESC_SIZE : 0; + err = vmxnet3_create_queues(adapter, - VMXNET3_DEF_TX_RING_SIZE, - VMXNET3_DEF_RX_RING_SIZE, - VMXNET3_DEF_RX_RING_SIZE); + new_tx_ring_size, + new_rx_ring_size, + new_rx_ring2_size, + adapter->txdata_desc_size, + new_rxdata_desc_size); if (err) { netdev_err(netdev, "failed to create queues " "with default sizes. Closing it\n"); @@ -545,6 +819,10 @@ vmxnet3_set_ringparam(struct net_device *netdev, netdev_err(netdev, "failed to re-activate, error %d." " Closing it\n", err); } + adapter->tx_ring_size = new_tx_ring_size; + adapter->rx_ring_size = new_rx_ring_size; + adapter->rx_ring2_size = new_rx_ring2_size; + adapter->rxdata_desc_size = new_rxdata_desc_size; out: clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); @@ -554,18 +832,260 @@ out: return err; } +static int +vmxnet3_get_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxfh_fields *info) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + enum Vmxnet3_RSSField rss_fields; + + if (!VMXNET3_VERSION_GE_4(adapter)) + return -EOPNOTSUPP; +#ifdef VMXNET3_RSS + if (!adapter->rss) + return -EOPNOTSUPP; +#endif + + if (netif_running(adapter->netdev)) { + unsigned long flags; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } else { + rss_fields = adapter->rss_fields; + } + + info->data = 0; + + /* Report default options for RSS on vmxnet3 */ + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case IPV4_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + if (VMXNET3_VERSION_GE_6(adapter) && + (rss_fields & VMXNET3_RSS_FIELDS_ESPIP6)) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} static int -vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, - u32 *rules) +vmxnet3_set_rss_hash_opt(struct net_device *netdev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = adapter->num_rx_queues; - return 0; + enum Vmxnet3_RSSField rss_fields; + + if (!VMXNET3_VERSION_GE_4(adapter)) + return -EOPNOTSUPP; +#ifdef VMXNET3_RSS + if (!adapter->rss) + return -EOPNOTSUPP; +#endif + + rss_fields = adapter->rss_fields; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6; + break; + default: + return -EINVAL; + } + break; + case ESP_V4_FLOW: + case AH_V4_FLOW: + case AH_ESP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4; + break; + default: + return -EINVAL; + } + break; + case ESP_V6_FLOW: + case AH_V6_FLOW: + case AH_ESP_V6_FLOW: + if (!VMXNET3_VERSION_GE_6(adapter)) + return -EOPNOTSUPP; + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6; + break; + default: + return -EINVAL; + } + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (rss_fields != adapter->rss_fields) { + adapter->default_rss_fields = false; + if (netif_running(netdev)) { + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + if (VMXNET3_VERSION_GE_7(adapter)) { + if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 || + rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) && + vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_UDP_RSS)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS; + } else { + adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS); + } + if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) && + vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_ESP_RSS_IPV4)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4; + } else { + adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4); + } + if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) && + vmxnet3_check_ptcapability(adapter->ptcap_supported[0], + VMXNET3_CAP_ESP_RSS_IPV6)) { + adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6; + } else { + adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6); + } + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, + adapter->dev_caps[0]); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_DCR0_REG); + adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, + VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } + spin_lock_irqsave(&adapter->cmd_lock, flags); + cmdInfo->setRssFields = rss_fields; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_RSS_FIELDS); + + /* Not all requested RSS may get applied, so get and + * cache what was actually applied. + */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_RSS_FIELDS); + adapter->rss_fields = + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } else { + /* When the device is activated, we will try to apply + * these rules and cache the applied value later. + */ + adapter->rss_fields = rss_fields; + } } - return -EOPNOTSUPP; + return 0; +} + +static u32 vmxnet3_get_rx_ring_count(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + return adapter->num_rx_queues; } #ifdef VMXNET3_RSS @@ -579,28 +1099,41 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev) } static int -vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) +vmxnet3_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = rssConf->indTableSize; + rxfh->hfunc = ETH_RSS_HASH_TOP; + if (!rxfh->indir) + return 0; + if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) + return 0; while (n--) - p[n] = rssConf->indTable[n]; + rxfh->indir[n] = rssConf->indTable[n]; return 0; } static int -vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) +vmxnet3_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; + /* We do not allow change in unsupported parameters */ + if (rxfh->key || + (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!rxfh->indir) + return 0; for (i = 0; i < rssConf->indTableSize; i++) - rssConf->indTable[i] = p[i]; + rssConf->indTable[i] = rxfh->indir[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, @@ -612,28 +1145,197 @@ vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) } #endif +static int vmxnet3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (!VMXNET3_VERSION_GE_3(adapter)) + return -EOPNOTSUPP; + + switch (adapter->coal_conf->coalMode) { + case VMXNET3_COALESCE_DISABLED: + /* struct ethtool_coalesce is already initialized to 0 */ + break; + case VMXNET3_COALESCE_ADAPT: + ec->use_adaptive_rx_coalesce = true; + break; + case VMXNET3_COALESCE_STATIC: + ec->tx_max_coalesced_frames = + adapter->coal_conf->coalPara.coalStatic.tx_comp_depth; + ec->rx_max_coalesced_frames = + adapter->coal_conf->coalPara.coalStatic.rx_depth; + break; + case VMXNET3_COALESCE_RBC: { + u32 rbc_rate; + + rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate; + ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int vmxnet3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + if (!VMXNET3_VERSION_GE_3(adapter)) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs == 0) && + (ec->use_adaptive_rx_coalesce == 0) && + (ec->tx_max_coalesced_frames == 0) && + (ec->rx_max_coalesced_frames == 0)) { + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; + goto done; + } + + if (ec->rx_coalesce_usecs != 0) { + u32 rbc_rate; + + if ((ec->use_adaptive_rx_coalesce != 0) || + (ec->tx_max_coalesced_frames != 0) || + (ec->rx_max_coalesced_frames != 0)) { + return -EINVAL; + } + + rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs); + if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE || + rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) { + return -EINVAL; + } + + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC; + adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate; + goto done; + } + + if (ec->use_adaptive_rx_coalesce != 0) { + if (ec->tx_max_coalesced_frames != 0 || + ec->rx_max_coalesced_frames != 0) { + return -EINVAL; + } + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT; + goto done; + } + + if ((ec->tx_max_coalesced_frames != 0) || + (ec->rx_max_coalesced_frames != 0)) { + if ((ec->tx_max_coalesced_frames > + VMXNET3_COAL_STATIC_MAX_DEPTH) || + (ec->rx_max_coalesced_frames > + VMXNET3_COAL_STATIC_MAX_DEPTH)) { + return -EINVAL; + } + + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC; + + adapter->coal_conf->coalPara.coalStatic.tx_comp_depth = + (ec->tx_max_coalesced_frames ? + ec->tx_max_coalesced_frames : + VMXNET3_COAL_STATIC_DEFAULT_DEPTH); + + adapter->coal_conf->coalPara.coalStatic.rx_depth = + (ec->rx_max_coalesced_frames ? + ec->rx_max_coalesced_frames : + VMXNET3_COAL_STATIC_DEFAULT_DEPTH); + + adapter->coal_conf->coalPara.coalStatic.tx_depth = + VMXNET3_COAL_STATIC_DEFAULT_DEPTH; + goto done; + } + +done: + adapter->default_coal_mode = false; + if (netif_running(netdev)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); + cmdInfo->varConf.confVer = 1; + cmdInfo->varConf.confLen = + cpu_to_le32(sizeof(*adapter->coal_conf)); + cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_COALESCE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } + + return 0; +} + +static void vmxnet3_get_channels(struct net_device *netdev, + struct ethtool_channels *ec) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) { + if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { + ec->combined_count = adapter->num_tx_queues; + } else { + ec->rx_count = adapter->num_rx_queues; + ec->tx_count = + adapter->share_intr == VMXNET3_INTR_TXSHARE ? + 1 : adapter->num_tx_queues; + } + } else { + ec->combined_count = 1; + } + + ec->other_count = 1; + + /* Number of interrupts cannot be changed on the fly */ + /* Just set maximums to actual values */ + ec->max_rx = ec->rx_count; + ec->max_tx = ec->tx_count; + ec->max_combined = ec->combined_count; + ec->max_other = ec->other_count; +} + static const struct ethtool_ops vmxnet3_ethtool_ops = { - .get_settings = vmxnet3_get_settings, + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, .get_regs = vmxnet3_get_regs, .get_wol = vmxnet3_get_wol, .set_wol = vmxnet3_set_wol, .get_link = ethtool_op_get_link, + .get_coalesce = vmxnet3_get_coalesce, + .set_coalesce = vmxnet3_set_coalesce, .get_strings = vmxnet3_get_strings, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, - .get_rxnfc = vmxnet3_get_rxnfc, + .get_rx_ring_count = vmxnet3_get_rx_ring_count, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, - .get_rxfh_indir = vmxnet3_get_rss_indir, - .set_rxfh_indir = vmxnet3_set_rss_indir, + .get_rxfh = vmxnet3_get_rss, + .set_rxfh = vmxnet3_set_rss, #endif + .get_rxfh_fields = vmxnet3_get_rss_hash_opts, + .set_rxfh_fields = vmxnet3_set_rss_hash_opt, + .get_link_ksettings = vmxnet3_get_link_ksettings, + .get_channels = vmxnet3_get_channels, }; void vmxnet3_set_ethtool_ops(struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); + netdev->ethtool_ops = &vmxnet3_ethtool_ops; } |
