summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_ethtool.c')
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c1382
1 files changed, 1123 insertions, 259 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 9f87304109fe..2cc21289a707 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+#include <linux/uaccess.h>
+
+#include <net/netdev_lock.h>
+
/* ethtool support for iavf */
#include "iavf.h"
-#include <linux/uaccess.h>
-
/* ethtool statistics helpers */
/**
@@ -42,7 +45,7 @@ struct iavf_stats {
*/
#define IAVF_STAT(_type, _name, _stat) { \
.stat_string = _name, \
- .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .sizeof_stat = sizeof_field(_type, _stat), \
.stat_offset = offsetof(_type, _stat) \
}
@@ -147,7 +150,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy
*
* Queue statistics must be copied while protected by
- * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
+ * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
* Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied
@@ -165,14 +168,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
/* To avoid invalid statistics values, ensure that we keep retrying
* the copy until we get a consistent value according to
- * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * u64_stats_fetch_retry. But first, make sure our ring is
* non-null before attempting to access its syncp.
*/
do {
- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
for (i = 0; i < size; i++)
iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -239,29 +242,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct iavf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
- IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
-};
-
-#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
-
/**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -278,35 +258,46 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
- /* Set speed and duplex */
+ cmd->base.duplex = DUPLEX_FULL;
+
+ if (ADV_LINK_SUPPORT(adapter)) {
+ if (adapter->link_speed_mbps &&
+ adapter->link_speed_mbps < U32_MAX)
+ cmd->base.speed = adapter->link_speed_mbps;
+ else
+ cmd->base.speed = SPEED_UNKNOWN;
+
+ return 0;
+ }
+
switch (adapter->link_speed) {
- case I40E_LINK_SPEED_40GB:
+ case VIRTCHNL_LINK_SPEED_40GB:
cmd->base.speed = SPEED_40000;
break;
- case I40E_LINK_SPEED_25GB:
-#ifdef SPEED_25000
+ case VIRTCHNL_LINK_SPEED_25GB:
cmd->base.speed = SPEED_25000;
-#else
- netdev_info(netdev,
- "Speed is 25G, display not supported by this version of ethtool.\n");
-#endif
break;
- case I40E_LINK_SPEED_20GB:
+ case VIRTCHNL_LINK_SPEED_20GB:
cmd->base.speed = SPEED_20000;
break;
- case I40E_LINK_SPEED_10GB:
+ case VIRTCHNL_LINK_SPEED_10GB:
cmd->base.speed = SPEED_10000;
break;
- case I40E_LINK_SPEED_1GB:
+ case VIRTCHNL_LINK_SPEED_5GB:
+ cmd->base.speed = SPEED_5000;
+ break;
+ case VIRTCHNL_LINK_SPEED_2_5GB:
+ cmd->base.speed = SPEED_2500;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
cmd->base.speed = SPEED_1000;
break;
- case I40E_LINK_SPEED_100MB:
+ case VIRTCHNL_LINK_SPEED_100MB:
cmd->base.speed = SPEED_100;
break;
default:
break;
}
- cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -320,11 +311,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
**/
static int iavf_get_sset_count(struct net_device *netdev, int sset)
{
+ /* Report the maximum number queues, even if not every queue is
+ * currently configured. Since allocation of queues is in pairs,
+ * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
+ * at device creation and never changes.
+ */
+
if (sset == ETH_SS_STATS)
return IAVF_STATS_LEN +
- (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return IAVF_PRIV_FLAGS_STR_LEN;
+ (IAVF_QUEUE_STATS_LEN * 2 *
+ netdev->real_num_tx_queues);
else
return -EINVAL;
}
@@ -343,44 +339,30 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
unsigned int i;
+ /* Explicitly request stats refresh */
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
+
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
- for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ /* As num_active_queues describe both tx and rx queues, we can use
+ * it to iterate over rings' stats.
+ */
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct iavf_ring *ring;
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->tx_rings[i] : NULL);
+ /* Tx rings stats */
+ ring = &adapter->tx_rings[i];
iavf_add_queue_stats(&data, ring);
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->rx_rings[i] : NULL);
+ /* Rx rings stats */
+ ring = &adapter->rx_rings[i];
iavf_add_queue_stats(&data, ring);
}
rcu_read_unlock();
}
/**
- * iavf_get_priv_flag_strings - Get private flag strings
- * @netdev: network interface device structure
- * @data: buffer for string data
- *
- * Builds the private flags string table
- **/
-static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
-{
- unsigned int i;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- iavf_gstrings_priv_flags[i].flag_string);
- data += ETH_GSTRING_LEN;
- }
-}
-
-/**
* iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure
* @data: buffer for string data
@@ -393,10 +375,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
iavf_add_stat_strings(&data, iavf_gstrings_stats);
- /* Queues are always allocated in pairs, so we just use num_tx_queues
- * for both Tx and Rx queues.
+ /* Queues are always allocated in pairs, so we just use
+ * real_num_tx_queues for both Tx and Rx queues.
*/
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i);
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
@@ -418,106 +400,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data);
break;
- case ETH_SS_PRIV_FLAGS:
- iavf_get_priv_flag_strings(netdev, data);
- break;
default:
break;
}
}
/**
- * iavf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 iavf_get_priv_flags(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * iavf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & IAVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
- }
- }
-
- return 0;
-}
-
-/**
* iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure
*
@@ -559,23 +447,25 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->version, iavf_driver_version, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
}
/**
* iavf_get_ringparam - Get ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool extenal ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Returns current ring parameters. TX and RX rings are reported separately,
* but the number of rings is not reported.
**/
static void iavf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -589,43 +479,71 @@ static void iavf_get_ringparam(struct net_device *netdev,
* iavf_set_ringparam - Set ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool external ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Sets ring parameters. TX and RX rings are controlled separately, but the
* number of rings is not specified, so all rings get the same settings.
**/
static int iavf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
+ int ret = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- IAVF_MIN_TXD,
- IAVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > IAVF_MAX_TXD ||
+ ring->tx_pending < IAVF_MIN_TXD ||
+ ring->rx_pending > IAVF_MAX_RXD ||
+ ring->rx_pending < IAVF_MIN_RXD) {
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+ ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+ IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ return -EINVAL;
+ }
+
+ new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_count);
- new_rx_count = clamp_t(u32, ring->rx_pending,
- IAVF_MIN_RXD,
- IAVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_count);
/* if nothing to do return success */
if ((new_tx_count == adapter->tx_desc_count) &&
- (new_rx_count == adapter->rx_desc_count))
+ (new_rx_count == adapter->rx_desc_count)) {
+ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
+ }
- adapter->tx_desc_count = new_tx_count;
- adapter->rx_desc_count = new_rx_count;
+ if (new_tx_count != adapter->tx_desc_count) {
+ netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+ adapter->tx_desc_count, new_tx_count);
+ adapter->tx_desc_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_desc_count) {
+ netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+ adapter->rx_desc_count, new_rx_count);
+ adapter->rx_desc_count = new_rx_count;
+ }
if (netif_running(netdev)) {
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
+ iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+ ret = iavf_wait_for_reset(adapter);
+ if (ret)
+ netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
}
- return 0;
+ return ret;
}
/**
@@ -642,12 +560,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
@@ -675,6 +589,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
* iavf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
*
* Returns current coalescing settings. This is referred to elsewhere in the
* driver as Interrupt Throttle Rate, as this is how the hardware describes
@@ -682,7 +598,9 @@ static int __iavf_get_coalesce(struct net_device *netdev,
* only represents the settings of queue 0.
**/
static int iavf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
return __iavf_get_coalesce(netdev, ec, -1);
}
@@ -709,12 +627,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
*
* Change the ITR settings for a specific queue.
**/
-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
- struct ethtool_coalesce *ec, int queue)
+static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
+ struct ethtool_coalesce *ec, int queue)
{
struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
struct iavf_q_vector *q_vector;
+ u16 itr_setting;
+
+ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->rx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_rx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+ return -EINVAL;
+ }
+
+ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->tx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_tx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ return -EINVAL;
+ }
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
@@ -737,6 +674,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
* the Tx and Rx ITR values based on the values we have entered
* into the q_vector, no need to write the values now.
*/
+ return 0;
}
/**
@@ -751,24 +689,12 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
- if (ec->rx_coalesce_usecs == 0) {
- if (ec->use_adaptive_rx_coalesce)
- netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
+ if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
- } else if (ec->tx_coalesce_usecs == 0) {
- if (ec->use_adaptive_tx_coalesce)
- netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
- (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
+ } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -778,9 +704,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
*/
if (queue < 0) {
for (i = 0; i < adapter->num_active_queues; i++)
- iavf_set_itr_per_queue(adapter, ec, i);
+ if (iavf_set_itr_per_queue(adapter, ec, i))
+ return -EINVAL;
} else if (queue < adapter->num_active_queues) {
- iavf_set_itr_per_queue(adapter, ec, queue);
+ if (iavf_set_itr_per_queue(adapter, ec, queue))
+ return -EINVAL;
} else {
netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
adapter->num_active_queues - 1);
@@ -794,11 +722,15 @@ static int __iavf_set_coalesce(struct net_device *netdev,
* iavf_set_coalesce - Set interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
*
* Change current coalescing settings for every queue.
**/
static int iavf_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
return __iavf_set_coalesce(netdev, ec, -1);
}
@@ -818,6 +750,908 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
}
/**
+ * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
+{
+ switch (flow) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ return AH_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ return ESP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return AH_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return ESP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return ETHER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_SCTP;
+ case AH_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_AH;
+ case ESP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_ESP;
+ case IPV4_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_SCTP;
+ case AH_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_AH;
+ case ESP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_ESP;
+ case IPV6_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_OTHER;
+ case ETHER_FLOW:
+ return IAVF_FDIR_FLOW_NON_IP_L2;
+ default:
+ return IAVF_FDIR_FLOW_NONE;
+ }
+}
+
+/**
+ * iavf_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool iavf_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * iavf_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter for userdef data storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_flex_word *flex;
+ int i, cnt = 0;
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
+#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
+#define IAVF_USERDEF_FLEX_OFFS_S 16
+#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
+#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
+ u32 value = be32_to_cpu(fsp->h_ext.data[i]);
+ u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
+
+ if (!value || !mask)
+ continue;
+
+ if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
+ return -EINVAL;
+
+ /* 504 is the maximum value for offsets, and offset is measured
+ * from the start of the MAC address.
+ */
+#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
+ flex = &fltr->flex_words[cnt++];
+ flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
+ flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
+ if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+ }
+
+ fltr->flex_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_rx_flow_ext_data - fill the additional data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter to get additional data
+ */
+static void
+iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
+ return;
+
+ fsp->flow_type |= FLOW_EXT;
+
+ memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
+}
+
+/**
+ * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
+ * @adapter: the VF adapter structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *rule = NULL;
+ int ret = 0;
+
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
+ if (!rule) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
+ break;
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
+ fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iavf_fill_rx_flow_ext_data(fsp, rule);
+
+ if (rule->action == VIRTCHNL_ACTION_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @adapter: the VF adapter structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct iavf_fdir_fltr *fltr;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = fltr->loc;
+ cnt++;
+ }
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+
+ return val;
+}
+
+/**
+ * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: filter structure
+ */
+static int
+iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ u32 flow_type, q_index = 0;
+ enum virtchnl_action act;
+ int err;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ act = VIRTCHNL_ACTION_DROP;
+ } else {
+ q_index = fsp->ring_cookie;
+ if (q_index >= adapter->num_active_queues)
+ return -EINVAL;
+
+ act = VIRTCHNL_ACTION_QUEUE;
+ }
+
+ fltr->action = act;
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(fltr->ext_data.usr_def));
+ memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(fltr->ext_mask.usr_def));
+ }
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
+ fltr->ip_ver = 4;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
+ fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
+ fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
+ fltr->ip_ver = 4;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
+ fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
+ fltr->ip_ver = 4;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
+ fltr->ip_ver = 6;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
+ fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
+ fltr->ip_ver = 6;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
+ fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ fltr->ip_ver = 6;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+ fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ err = iavf_validate_fdir_fltr_masks(adapter, fltr);
+ if (err)
+ return err;
+
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+ err = iavf_parse_rx_flow_user_data(fsp, fltr);
+ if (err)
+ return err;
+
+ return iavf_fill_fdir_add_msg(adapter, fltr);
+}
+
+/**
+ * iavf_add_fdir_ethtool - add Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to add Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct iavf_fdir_fltr *fltr;
+ int err;
+
+ netdev_assert_locked(adapter->netdev);
+
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
+ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
+ if (!err)
+ err = iavf_fdir_add_fltr(adapter, fltr);
+
+ if (err)
+ kfree(fltr);
+
+ return err;
+}
+
+/**
+ * iavf_del_fdir_ethtool - delete Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ return -EOPNOTSUPP;
+
+ return iavf_fdir_del_fltr(adapter, false, fsp->location);
+}
+
+static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd)
+{
+ u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ return hdrs;
+}
+
+static u64
+iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm)
+{
+ u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
+
+ if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case GTPC_V4_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+ if (cmd->data & RXH_GTP_TEID) {
+ switch (cmd->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+static int
+iavf_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adv_rss *rss_old, *rss_new;
+ bool rss_new_add = false;
+ bool symm = false;
+ u64 hash_flds;
+ int err = 0;
+ u32 hdrs;
+
+ netdev_assert_locked(adapter->netdev);
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
+ if (!rss_new)
+ return -ENOMEM;
+
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
+ symm)) {
+ kfree(rss_new);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss_old) {
+ if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
+ err = -EBUSY;
+ } else if (rss_old->hash_flds != hash_flds ||
+ rss_old->symm != symm) {
+ rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_old->hash_flds = hash_flds;
+ rss_old->symm = symm;
+ memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
+ sizeof(rss_new->cfg_msg));
+ } else {
+ err = -EEXIST;
+ }
+ } else {
+ rss_new_add = true;
+ rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_new->packet_hdrs = hdrs;
+ rss_new->hash_flds = hash_flds;
+ rss_new->symm = symm;
+ list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (!err)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
+
+ if (!rss_new_add)
+ kfree(rss_new);
+
+ return err;
+}
+
+static int
+iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adv_rss *rss;
+ u64 hash_flds;
+ u32 hdrs;
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = 0;
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss)
+ hash_flds = rss->hash_flds;
+ else
+ hash_flds = IAVF_ADV_RSS_HASH_INVALID;
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
+ cmd->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
+ cmd->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
+ cmd->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
+ cmd->data |= (u64)RXH_L4_B_2_3;
+
+ return 0;
+}
+
+/**
+ * iavf_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = iavf_add_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = iavf_del_fdir_ethtool(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iavf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 iavf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_active_queues;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -832,13 +1666,20 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
+ break;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ cmd->rule_cnt = adapter->fdir_active_fltr;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- netdev_info(netdev,
- "RSS hash info is not available to vf, use pf.\n");
+ case ETHTOOL_GRXCLSRULE:
+ ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
default:
break;
@@ -860,7 +1701,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = IAVF_MAX_REQ_QUEUES;
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -881,14 +1722,8 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- int num_req = ch->combined_count;
-
- if (num_req != adapter->num_active_queues &&
- !(adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
- dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
- return -EINVAL;
- }
+ u32 num_req = ch->combined_count;
+ int ret = 0;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -899,14 +1734,24 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
+ if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
+ if (num_req == adapter->num_active_queues)
+ return 0;
+
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL;
adapter->num_req_queues = num_req;
- return iavf_request_queues(adapter, num_req);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+
+ ret = iavf_wait_for_reset(adapter);
+ if (ret)
+ netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
+
+ return ret;
}
/**
@@ -938,28 +1783,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
/**
* iavf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int iavf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
- return 0;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ if (rxfh->indir)
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ rxfh->indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
@@ -967,37 +1811,55 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/**
* iavf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int iavf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Only support toeplitz hash function */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+
+ if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ }
+
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key)
- memcpy(adapter->rss_key, key, adapter->rss_key_size);
+ if (rxfh->key)
+ memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ if (rxfh->indir) {
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
+ }
return iavf_config_rss(adapter);
}
static const struct ethtool_ops iavf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
@@ -1005,18 +1867,20 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count,
- .get_priv_flags = iavf_get_priv_flags,
- .set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce,
.set_coalesce = iavf_set_coalesce,
.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
+ .set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
+ .get_rx_ring_count = iavf_get_rx_ring_count,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
+ .get_rxfh_fields = iavf_get_rxfh_fields,
+ .set_rxfh_fields = iavf_set_rxfh_fields,
.get_channels = iavf_get_channels,
.set_channels = iavf_set_channels,
.get_rxfh_key_size = iavf_get_rxfh_key_size,