summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
diff options
context:
space:
mode:
authorTariq Toukan <tariqt@mellanox.com>2016-11-02 17:12:24 +0200
committerDavid S. Miller <davem@davemloft.net>2016-11-02 15:07:11 -0400
commit67f8b1dcb9ee7f1e165da4eb2ec53483a6b141ea (patch)
treebb2be58dee54bf94595e5f177ebe10de4dbf9e4f /drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
parentccc109b8ed24c6a84cc46a78ec3df9a57a6c674a (diff)
net/mlx4_en: Refactor the XDP forwarding rings scheme
Separately manage the two types of TX rings: regular ones, and XDP. Upon an XDP set, do not borrow regular TX rings and convert them into XDP ones, but allocate new ones, unless we hit the max number of rings. Which means that in systems with smaller #cores we will not consume the current TX rings for XDP, while we are still in the num TX limit. XDP TX rings counters are not shown in ethtool statistics. Instead, XDP counters will be added to the respective RX rings in a downstream patch. This has no performance implications. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_ethtool.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c76
1 files changed, 45 insertions, 31 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bdda17d2ea0f..e8ccb95680bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -49,16 +49,19 @@
static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
{
- int i;
+ int i, t;
int err = 0;
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i]->moder_cnt = priv->tx_frames;
- priv->tx_cq[i]->moder_time = priv->tx_usecs;
- if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
- if (err)
- return err;
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv,
+ priv->tx_cq[t][i]);
+ if (err)
+ return err;
+ }
}
}
@@ -336,7 +339,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
- (priv->tx_ring_num * 2) +
+ (priv->tx_ring_num[TX] * 2) +
(priv->rx_ring_num * 3);
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@@ -397,9 +400,9 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->pkstats)[i];
- for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i]->packets;
- data[index++] = priv->tx_ring[i]->bytes;
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ data[index++] = priv->tx_ring[TX][i]->packets;
+ data[index++] = priv->tx_ring[TX][i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
@@ -467,7 +470,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
- for (i = 0; i < priv->tx_ring_num; i++) {
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
@@ -1060,7 +1063,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
- tx_size == priv->tx_ring[0]->size)
+ tx_size == priv->tx_ring[TX][0]->size)
return 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1105,7 +1108,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
- param->tx_pending = priv->tx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[TX][0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
@@ -1710,7 +1713,7 @@ static void mlx4_en_get_channels(struct net_device *dev,
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+ channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
}
static int mlx4_en_set_channels(struct net_device *dev,
@@ -1721,6 +1724,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int port_up = 0;
+ int xdp_count;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1729,20 +1733,25 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
- if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
- en_err(priv, "Minimum %d tx channels required with XDP on\n",
- priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
- return -EINVAL;
- }
-
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
+ xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
+ if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+ MAX_TX_RINGS);
+ goto out;
+ }
+
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
- new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
@@ -1756,14 +1765,13 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
- priv->xdp_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc)
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
- en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+ en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
if (port_up) {
@@ -1774,8 +1782,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
err = mlx4_en_moderation_update(priv);
out:
- kfree(tmp);
mutex_unlock(&mdev->state_lock);
+ kfree(tmp);
return err;
}
@@ -1823,11 +1831,15 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
int ret = 0;
if (bf_enabled_new != bf_enabled_old) {
+ int t;
+
if (bf_enabled_new) {
bool bf_supported = true;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ bf_supported &=
+ priv->tx_ring[t][i]->bf_alloced;
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
@@ -1839,8 +1851,10 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ priv->tx_ring[t][i]->bf_enabled =
+ bf_enabled_new;
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");