summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
10 files changed, 514 insertions, 326 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e36bebcab3f2..a49072b4fa52 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2679,15 +2679,13 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
- &mailbox->dma);
+ mailbox->buf = pci_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
- memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
-
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index e3be7e44ff51..09dd3776db76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -65,7 +65,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
- cq->is_tx = mode;
+ cq->type = mode;
cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
@@ -104,7 +104,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
- if (cq->is_tx == RX) {
+ if (cq->type == RX) {
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
cq->vector)) {
cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
@@ -127,25 +127,17 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
- int xdp_index;
-
- /* The xdp tx irq must align with the rx ring that forwards to
- * it, so reindex these from 0. This should only happen when
- * tx_ring_num is not a multiple of rx_ring_num.
- */
- xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
- if (xdp_index >= 0)
- cq_idx = xdp_index;
+
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
- if (!cq->is_tx)
+ if (cq->type == RX)
cq->size = priv->rx_ring[cq->ring]->actual_size;
- if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
- (!cq->is_tx && priv->hwtstamp_config.rx_filter))
+ if ((cq->type != RX && priv->hwtstamp_config.tx_type) ||
+ (cq->type == RX && priv->hwtstamp_config.rx_filter))
timestamp_en = 1;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
@@ -154,10 +146,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
- cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+ cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
- if (cq->is_tx)
+ if (cq->type != RX)
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
else
@@ -181,7 +173,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
- cq->is_tx == RX)
+ cq->type == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
cq->vector = 0;
cq->buf_size = 0;
@@ -193,10 +185,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
napi_disable(&cq->napi);
- if (!cq->is_tx) {
- napi_hash_del(&cq->napi);
- synchronize_rcu();
- }
netif_napi_del(&cq->napi);
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bdda17d2ea0f..d9c9f86a30df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -49,16 +49,19 @@
static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
{
- int i;
+ int i, t;
int err = 0;
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_cq[i]->moder_cnt = priv->tx_frames;
- priv->tx_cq[i]->moder_time = priv->tx_usecs;
- if (priv->port_up) {
- err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
- if (err)
- return err;
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
+ priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv,
+ priv->tx_cq[t][i]);
+ if (err)
+ return err;
+ }
}
}
@@ -192,6 +195,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"tx_prio_7_packets", "tx_prio_7_bytes",
"tx_novlan_packets", "tx_novlan_bytes",
+ /* xdp statistics */
+ "rx_xdp_drop",
+ "rx_xdp_tx",
+ "rx_xdp_tx_full",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
@@ -336,8 +343,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
- (priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 3);
+ (priv->tx_ring_num[TX] * 2) +
+ (priv->rx_ring_num * (3 + NUM_XDP_STATS));
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -360,6 +367,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
+
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&dev->stats)[i];
@@ -397,14 +406,21 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->pkstats)[i];
- for (i = 0; i < priv->tx_ring_num; i++) {
- data[index++] = priv->tx_ring[i]->packets;
- data[index++] = priv->tx_ring[i]->bytes;
+ for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ data[index++] = priv->tx_ring[TX][i]->packets;
+ data[index++] = priv->tx_ring[TX][i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
+ data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_tx;
+ data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
spin_unlock_bh(&priv->stats_lock);
@@ -467,7 +483,13 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
- for (i = 0; i < priv->tx_ring_num; i++) {
+ for (i = 0; i < NUM_XDP_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
@@ -480,6 +502,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_bytes", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_dropped", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_drop", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_tx", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
@@ -1060,7 +1088,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
- tx_size == priv->tx_ring[0]->size)
+ tx_size == priv->tx_ring[TX][0]->size)
return 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1105,7 +1133,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
- param->tx_pending = priv->tx_ring[0]->size;
+ param->tx_pending = priv->tx_ring[TX][0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
@@ -1710,7 +1738,7 @@ static void mlx4_en_get_channels(struct net_device *dev,
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
channel->rx_count = priv->rx_ring_num;
- channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
+ channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP;
}
static int mlx4_en_set_channels(struct net_device *dev,
@@ -1721,6 +1749,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int port_up = 0;
+ int xdp_count;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1729,20 +1758,25 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
- if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
- en_err(priv, "Minimum %d tx channels required with XDP on\n",
- priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
- return -EINVAL;
- }
-
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
+ xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
+ if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) {
+ err = -EINVAL;
+ en_err(priv,
+ "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
+ channel->tx_count * MLX4_EN_NUM_UP + xdp_count,
+ MAX_TX_RINGS);
+ goto out;
+ }
+
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
- new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
@@ -1756,14 +1790,13 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
- priv->xdp_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc)
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
- en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
+ en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
if (port_up) {
@@ -1774,8 +1807,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
err = mlx4_en_moderation_update(priv);
out:
- kfree(tmp);
mutex_unlock(&mdev->state_lock);
+ kfree(tmp);
return err;
}
@@ -1823,11 +1856,15 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
int ret = 0;
if (bf_enabled_new != bf_enabled_old) {
+ int t;
+
if (bf_enabled_new) {
bool bf_supported = true;
- for (i = 0; i < priv->tx_ring_num; i++)
- bf_supported &= priv->tx_ring[i]->bf_alloced;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ bf_supported &=
+ priv->tx_ring[t][i]->bf_alloced;
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
@@ -1839,8 +1876,10 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
- for (i = 0; i < priv->tx_ring_num; i++)
- priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ priv->tx_ring[t][i]->bf_enabled =
+ bf_enabled_new;
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index bf7628db098a..36a7a54bbb82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -169,7 +169,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
- params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
+ params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fb8bb027b69c..49a81f1fc1d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1217,8 +1217,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->tx_ring_num; i++) {
- cq = priv->tx_cq[i];
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ cq = priv->tx_cq[TX][i];
napi_schedule(&cq->napi);
}
}
@@ -1302,12 +1302,14 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
- for (i = 0; i < priv->tx_ring_num; i++) {
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
+
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
continue;
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
- i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
- priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
+ i, tx_ring->qpn, tx_ring->sp_cqn,
+ tx_ring->cons, tx_ring->prod);
}
priv->port_stats.tx_timeout++;
@@ -1322,6 +1324,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
@@ -1331,7 +1334,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
struct mlx4_en_cq *cq;
- int i;
+ int i, t;
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
@@ -1356,10 +1359,12 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->last_moder_bytes[i] = 0;
}
- for (i = 0; i < priv->tx_ring_num; i++) {
- cq = priv->tx_cq[i];
- cq->moder_cnt = priv->tx_frames;
- cq->moder_time = priv->tx_usecs;
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ cq = priv->tx_cq[t][i];
+ cq->moder_cnt = priv->tx_frames;
+ cq->moder_time = priv->tx_usecs;
+ }
}
/* Reset auto-moderation params */
@@ -1390,10 +1395,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
- spin_lock_bh(&priv->stats_lock);
- rx_packets = priv->rx_ring[ring]->packets;
- rx_bytes = priv->rx_ring[ring]->bytes;
- spin_unlock_bh(&priv->stats_lock);
+ rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
+ rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
rx_pkt_diff = ((unsigned long) (rx_packets -
priv->last_moder_packets[ring]));
@@ -1529,19 +1532,13 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
int tx_ring_idx)
{
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
- int rr_index;
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
+ int rr_index = tx_ring_idx;
- rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
- if (rr_index >= 0) {
- tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
- tx_ring->recycle_ring = priv->rx_ring[rr_index];
- en_dbg(DRV, priv,
- "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
- tx_ring_idx, rr_index);
- } else {
- tx_ring->recycle_ring = NULL;
- }
+ tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
+ tx_ring->recycle_ring = priv->rx_ring[rr_index];
+ en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
+ TX_XDP, tx_ring_idx, rr_index);
}
int mlx4_en_start_port(struct net_device *dev)
@@ -1551,9 +1548,8 @@ int mlx4_en_start_port(struct net_device *dev)
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
int rx_index = 0;
- int tx_index = 0;
int err = 0;
- int i;
+ int i, t;
int j;
u8 mc_list[16] = {0};
@@ -1638,43 +1634,51 @@ int mlx4_en_start_port(struct net_device *dev)
goto rss_err;
/* Configure tx cq's and rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- /* Configure cq */
- cq = priv->tx_cq[i];
- err = mlx4_en_activate_cq(priv, cq, i);
- if (err) {
- en_err(priv, "Failed allocating Tx CQ\n");
- goto tx_err;
- }
- err = mlx4_en_set_cq_moder(priv, cq);
- if (err) {
- en_err(priv, "Failed setting cq moderation parameters\n");
- mlx4_en_deactivate_cq(priv, cq);
- goto tx_err;
- }
- en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
- cq->buf->wqe_index = cpu_to_be16(0xffff);
-
- /* Configure ring */
- tx_ring = priv->tx_ring[i];
- err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
- i / priv->num_tx_rings_p_up);
- if (err) {
- en_err(priv, "Failed allocating Tx ring\n");
- mlx4_en_deactivate_cq(priv, cq);
- goto tx_err;
- }
- tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
- mlx4_en_init_recycle_ring(priv, i);
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ /* Configure cq */
+ cq = priv->tx_cq[t][i];
+ err = mlx4_en_activate_cq(priv, cq, i);
+ if (err) {
+ en_err(priv, "Failed allocating Tx CQ\n");
+ goto tx_err;
+ }
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ en_err(priv, "Failed setting cq moderation parameters\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ en_dbg(DRV, priv,
+ "Resetting index of collapsed CQ:%d to -1\n", i);
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+ /* Configure ring */
+ tx_ring = priv->tx_ring[t][i];
+ err = mlx4_en_activate_tx_ring(priv, tx_ring,
+ cq->mcq.cqn,
+ i / num_tx_rings_p_up);
+ if (err) {
+ en_err(priv, "Failed allocating Tx ring\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ if (t != TX_XDP) {
+ tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ tx_ring->recycle_ring = NULL;
+ } else {
+ mlx4_en_init_recycle_ring(priv, i);
+ }
- /* Arm CQ for TX completions */
- mlx4_en_arm_cq(priv, cq);
+ /* Arm CQ for TX completions */
+ mlx4_en_arm_cq(priv, cq);
- /* Set initial ownership of all Tx TXBBs to SW (1) */
- for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
- *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
- ++tx_index;
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+ *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
+ }
}
/* Configure port */
@@ -1749,9 +1753,18 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
tx_err:
- while (tx_index--) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
+ if (t == MLX4_EN_NUM_TX_TYPES) {
+ t--;
+ i = priv->tx_ring_num[t];
+ }
+ while (t >= 0) {
+ while (i--) {
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+ }
+ if (!t--)
+ break;
+ i = priv->tx_ring_num[t];
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
@@ -1776,7 +1789,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
struct ethtool_flow_id *flow, *tmp_flow;
- int i;
+ int i, t;
u8 mc_list[16] = {0};
if (!priv->port_up) {
@@ -1796,8 +1809,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
netif_tx_disable(dev);
+ spin_lock_bh(&priv->stats_lock);
+ mlx4_en_fold_software_stats(dev);
/* Set port as not active */
priv->port_up = false;
+ spin_unlock_bh(&priv->stats_lock);
+
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
@@ -1862,14 +1879,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mlx4_en_destroy_drop_qp(priv);
/* Free TX Rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
- mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
+ }
}
msleep(10);
- for (i = 0; i < priv->tx_ring_num; i++)
- mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mlx4_en_delete_rss_steer_rules(priv);
@@ -1918,6 +1938,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring **tx_ring;
int i;
if (!mlx4_is_slave(mdev->dev))
@@ -1935,15 +1956,16 @@ static void mlx4_en_clear_stats(struct net_device *dev)
sizeof(priv->tx_priority_flowstats));
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
- for (i = 0; i < priv->tx_ring_num; i++) {
- priv->tx_ring[i]->bytes = 0;
- priv->tx_ring[i]->packets = 0;
- priv->tx_ring[i]->tx_csum = 0;
- priv->tx_ring[i]->tx_dropped = 0;
- priv->tx_ring[i]->queue_stopped = 0;
- priv->tx_ring[i]->wake_queue = 0;
- priv->tx_ring[i]->tso_packets = 0;
- priv->tx_ring[i]->xmit_more = 0;
+ tx_ring = priv->tx_ring[TX];
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ tx_ring[i]->bytes = 0;
+ tx_ring[i]->packets = 0;
+ tx_ring[i]->tx_csum = 0;
+ tx_ring[i]->tx_dropped = 0;
+ tx_ring[i]->queue_stopped = 0;
+ tx_ring[i]->wake_queue = 0;
+ tx_ring[i]->tso_packets = 0;
+ tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1999,17 +2021,20 @@ static int mlx4_en_close(struct net_device *dev)
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
- int i;
+ int i, t;
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = NULL;
#endif
- for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring && priv->tx_ring[i])
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq && priv->tx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ if (priv->tx_ring[t] && priv->tx_ring[t][i])
+ mlx4_en_destroy_tx_ring(priv,
+ &priv->tx_ring[t][i]);
+ if (priv->tx_cq[t] && priv->tx_cq[t][i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+ }
}
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2025,20 +2050,22 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
- int i;
+ int i, t;
int node;
/* Create tx Rings */
- for (i = 0; i < priv->tx_ring_num; i++) {
- node = cpu_to_node(i % num_online_cpus());
- if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
- prof->tx_ring_size, i, TX, node))
- goto err;
-
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
- prof->tx_ring_size, TXBB_SIZE,
- node, i))
- goto err;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ node = cpu_to_node(i % num_online_cpus());
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
+ prof->tx_ring_size, i, t, node))
+ goto err;
+
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
+ prof->tx_ring_size,
+ TXBB_SIZE, node, i))
+ goto err;
+ }
}
/* Create rx Rings */
@@ -2070,11 +2097,14 @@ err:
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
- for (i = 0; i < priv->tx_ring_num; i++) {
- if (priv->tx_ring[i])
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
- if (priv->tx_cq[i])
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
+ if (priv->tx_ring[t][i])
+ mlx4_en_destroy_tx_ring(priv,
+ &priv->tx_ring[t][i]);
+ if (priv->tx_cq[t][i])
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
+ }
}
return -ENOMEM;
}
@@ -2084,10 +2114,11 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src,
struct mlx4_en_port_profile *prof)
{
+ int t;
+
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
- dst->tx_ring_num = prof->tx_ring_num;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
@@ -2097,33 +2128,50 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
- dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!dst->tx_ring)
- return -ENOMEM;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ dst->tx_ring_num[t] = prof->tx_ring_num[t];
+ if (!dst->tx_ring_num[t])
+ continue;
- dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!dst->tx_cq) {
- kfree(dst->tx_ring);
- return -ENOMEM;
+ dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!dst->tx_ring[t])
+ goto err_free_tx;
+
+ dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!dst->tx_cq[t]) {
+ kfree(dst->tx_ring[t]);
+ goto err_free_tx;
+ }
}
+
return 0;
+
+err_free_tx:
+ while (t--) {
+ kfree(dst->tx_ring[t]);
+ kfree(dst->tx_cq[t]);
+ }
+ return -ENOMEM;
}
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src)
{
+ int t;
memcpy(dst->rx_ring, src->rx_ring,
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
memcpy(dst->rx_cq, src->rx_cq,
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
sizeof(dst->hwtstamp_config));
- dst->tx_ring_num = src->tx_ring_num;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ dst->tx_ring_num[t] = src->tx_ring_num[t];
+ dst->tx_ring[t] = src->tx_ring[t];
+ dst->tx_cq[t] = src->tx_cq[t];
+ }
dst->rx_ring_num = src->rx_ring_num;
- dst->tx_ring = src->tx_ring;
- dst->tx_cq = src->tx_cq;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
@@ -2131,14 +2179,18 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
struct mlx4_en_port_profile *prof)
{
+ int t;
+
mlx4_en_copy_priv(tmp, priv, prof);
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
"%s: Resource allocation failed, using previous configuration\n",
__func__);
- kfree(tmp->tx_ring);
- kfree(tmp->tx_cq);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ kfree(tmp->tx_ring[t]);
+ kfree(tmp->tx_cq[t]);
+ }
return -ENOMEM;
}
return 0;
@@ -2155,6 +2207,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ int t;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2188,8 +2241,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_en_free_resources(priv);
mutex_unlock(&mdev->state_lock);
- kfree(priv->tx_ring);
- kfree(priv->tx_cq);
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ kfree(priv->tx_ring[t]);
+ kfree(priv->tx_cq[t]);
+ }
free_netdev(dev);
}
@@ -2203,11 +2258,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
- if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
- en_err(priv, "Bad MTU size:%d.\n", new_mtu);
- return -EPERM;
- }
- if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
+ if (priv->tx_ring_num[TX_XDP] && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
en_err(priv, "MTU size:%d requires frags but XDP running\n",
new_mtu);
return -EOPNOTSUPP;
@@ -2598,7 +2649,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
struct mlx4_update_qp_params params;
int err;
@@ -2626,18 +2677,21 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
struct bpf_prog *old_prog;
+ struct mlx4_en_priv *tmp;
+ int tx_changed = 0;
int xdp_ring_num;
int port_up = 0;
int err;
int i;
- xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
+ xdp_ring_num = prog ? priv->rx_ring_num : 0;
/* No need to reconfigure buffers when simply swapping the
* program for a new one.
*/
- if (priv->xdp_ring_num == xdp_ring_num) {
+ if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
if (prog) {
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
if (IS_ERR(prog))
@@ -2661,28 +2715,44 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
return -EOPNOTSUPP;
}
- if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
- en_err(priv,
- "Minimum %d tx channels required to run XDP\n",
- (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
- return -EINVAL;
- }
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
if (prog) {
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out;
+ }
}
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
+
+ if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
+ tx_changed = 1;
+ new_prof.tx_ring_num[TX] =
+ MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
+ en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
+ }
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err) {
+ if (prog)
+ bpf_prog_sub(prog, priv->rx_ring_num - 1);
+ goto unlock_out;
+ }
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- priv->xdp_ring_num = xdp_ring_num;
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
- priv->xdp_ring_num);
+ mlx4_en_safe_replace_resources(priv, tmp);
+ if (tx_changed)
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
@@ -2702,15 +2772,18 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
}
}
+unlock_out:
mutex_unlock(&mdev->state_lock);
- return 0;
+out:
+ kfree(tmp);
+ return err;
}
static bool mlx4_xdp_attached(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return !!priv->xdp_ring_num;
+ return !!priv->tx_ring_num[TX_XDP];
}
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
@@ -3047,6 +3120,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
+ last_i += NUM_PKT_STATS;
+
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
+ last_i += NUM_XDP_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -3054,7 +3131,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
{
struct net_device *dev;
struct mlx4_en_priv *priv;
- int i;
+ int i, t;
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
@@ -3062,7 +3139,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (dev == NULL)
return -ENOMEM;
- netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
@@ -3099,21 +3176,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
- priv->tx_ring_num = prof->tx_ring_num;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!priv->tx_ring) {
- err = -ENOMEM;
- goto out;
- }
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
- GFP_KERNEL);
- if (!priv->tx_cq) {
- err = -ENOMEM;
- goto out;
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
+ priv->tx_ring_num[t] = prof->tx_ring_num[t];
+ if (!priv->tx_ring_num[t])
+ continue;
+
+ priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!priv->tx_ring[t]) {
+ err = -ENOMEM;
+ goto err_free_tx;
+ }
+ priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+ MAX_TX_RINGS, GFP_KERNEL);
+ if (!priv->tx_cq[t]) {
+ kfree(priv->tx_ring[t]);
+ err = -ENOMEM;
+ goto out;
+ }
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
@@ -3196,7 +3279,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
else
dev->netdev_ops = &mlx4_netdev_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
dev->ethtool_ops = &mlx4_en_ethtool_ops;
@@ -3286,13 +3369,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ /* MTU range: 46 - hw-specific max */
+ dev->min_mtu = MLX4_EN_MIN_MTU;
+ dev->max_mtu = priv->max_mtu;
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
- en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
@@ -3352,6 +3439,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
return 0;
+err_free_tx:
+ while (t--) {
+ kfree(priv->tx_ring[t]);
+ kfree(priv->tx_cq[t]);
+ }
out:
mlx4_en_destroy_netdev(dev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 59473a0ebcdf..9166d90e7328 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -147,6 +147,39 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
return ret;
}
+void mlx4_en_fold_software_stats(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ unsigned long packets, bytes;
+ int i;
+
+ if (!priv->port_up || mlx4_is_master(mdev->dev))
+ return;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
+
+ packets += READ_ONCE(ring->packets);
+ bytes += READ_ONCE(ring->bytes);
+ }
+ dev->stats.rx_packets = packets;
+ dev->stats.rx_bytes = bytes;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
+
+ packets += READ_ONCE(ring->packets);
+ bytes += READ_ONCE(ring->bytes);
+ }
+ dev->stats.tx_packets = packets;
+ dev->stats.tx_bytes = bytes;
+}
+
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_counter tmp_counter_stats;
@@ -159,6 +192,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_tx_dropped = 0;
unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
@@ -174,40 +208,42 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
- stats->rx_packets = 0;
- stats->rx_bytes = 0;
+ mlx4_en_fold_software_stats(dev);
+
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
+ priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_tx = 0;
+ priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
- stats->rx_packets += priv->rx_ring[i]->packets;
- stats->rx_bytes += priv->rx_ring[i]->bytes;
- sw_rx_dropped += priv->rx_ring[i]->dropped;
- priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
- priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
- priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
+ const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
+
+ sw_rx_dropped += READ_ONCE(ring->dropped);
+ priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
+ priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
+ priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
+ priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
+ priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
- stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
priv->port_stats.tso_packets = 0;
priv->port_stats.xmit_more = 0;
- for (i = 0; i < priv->tx_ring_num; i++) {
- const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
-
- stats->tx_packets += ring->packets;
- stats->tx_bytes += ring->bytes;
- stats->tx_dropped += ring->tx_dropped;
- priv->port_stats.tx_chksum_offload += ring->tx_csum;
- priv->port_stats.queue_stopped += ring->queue_stopped;
- priv->port_stats.wake_queue += ring->wake_queue;
- priv->port_stats.tso_packets += ring->tso_packets;
- priv->port_stats.xmit_more += ring->xmit_more;
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
+ const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
+
+ sw_tx_dropped += READ_ONCE(ring->tx_dropped);
+ priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum);
+ priv->port_stats.queue_stopped += READ_ONCE(ring->queue_stopped);
+ priv->port_stats.wake_queue += READ_ONCE(ring->wake_queue);
+ priv->port_stats.tso_packets += READ_ONCE(ring->tso_packets);
+ priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
+
if (mlx4_is_master(mdev->dev)) {
stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
&mlx4_en_stats->RTOT_prio_1,
@@ -245,7 +281,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) +
+ sw_tx_dropped;
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f2e8beddcf44..6562f78b07f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -688,18 +688,23 @@ out_loopback:
dev_kfree_skb_any(skb);
}
-static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
{
- int index = ring->prod & ring->size_mask;
+ u32 missing = ring->actual_size - (ring->prod - ring->cons);
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- if (mlx4_en_prepare_rx_desc(priv, ring, index,
+ /* Try to batch allocations, but not too much. */
+ if (missing < 8)
+ return false;
+ do {
+ if (mlx4_en_prepare_rx_desc(priv, ring,
+ ring->prod & ring->size_mask,
GFP_ATOMIC | __GFP_COLD))
break;
ring->prod++;
- index = ring->prod & ring->size_mask;
- }
+ } while (--missing);
+
+ return true;
}
/* When hardware doesn't strip the vlan, we need to calculate the checksum
@@ -788,7 +793,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int doorbell_pending;
struct sk_buff *skb;
- int tx_index;
int index;
int nr;
unsigned int length;
@@ -808,7 +812,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
doorbell_pending = 0;
- tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -877,8 +880,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
- ring->bytes += length;
- ring->packets++;
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
@@ -904,22 +905,26 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (likely(!mlx4_en_xmit_frame(frags, dev,
- length, tx_index,
+ if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
+ length, cq->ring,
&doorbell_pending)))
goto consumed;
- goto xdp_drop; /* Drop on xmit failure */
+ goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
-xdp_drop:
+ ring->xdp_drop++;
+xdp_drop_no_cnt:
if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
}
+ ring->bytes += length;
+ ring->packets++;
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
@@ -1081,15 +1086,20 @@ consumed:
out:
rcu_read_unlock();
- if (doorbell_pending)
- mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
+ if (polled) {
+ if (doorbell_pending)
+ mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]);
+
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+ ring->cons = cq->mcq.cons_index;
+ }
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
- mlx4_cq_set_ci(&cq->mcq);
- wmb(); /* ensure HW sees CQ consumer before we post new buffers */
- ring->cons = cq->mcq.cons_index;
- mlx4_en_refill_rx_buffers(priv, ring);
- mlx4_en_update_rx_prod_db(ring);
+
+ if (mlx4_en_refill_rx_buffers(priv, ring))
+ mlx4_en_update_rx_prod_db(ring);
+
return polled;
}
@@ -1131,14 +1141,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return budget;
/* Current cpu is not according to smp_irq_affinity -
- * probably affinity changed. need to stop this NAPI
- * poll, and restart it on the right CPU
+ * probably affinity changed. Need to stop this NAPI
+ * poll, and restart it on the right CPU.
+ * Try to avoid returning a too small value (like 0),
+ * to not fool net_rx_action() and its netdev_budget
*/
- done = 0;
+ if (done)
+ done--;
}
/* Done for now */
- napi_complete_done(napi, done);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, done))
+ mlx4_en_arm_cq(priv, cq);
return done;
}
@@ -1162,7 +1175,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
/* bpf requires buffers to be set up as 1 packet per page.
* This only works when num_frags == 1.
*/
- if (priv->xdp_ring_num) {
+ if (priv->tx_ring_num[TX_XDP]) {
dma_dir = PCI_DMA_BIDIRECTIONAL;
/* This will gain efficient xdp frame recycling at the expense
* of more costly truesize accounting
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e2509bba3e7c..4b597dca5c52 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -66,7 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
- ring->stride = stride;
+ ring->sp_stride = stride;
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
@@ -90,22 +90,22 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_info;
}
}
- ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+ ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- ring->buf = ring->wqres.buf.direct.buf;
+ ring->buf = ring->sp_wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
ring, ring->buf, ring->size, ring->buf_size,
- (unsigned long long) ring->wqres.buf.direct.map);
+ (unsigned long long) ring->sp_wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
MLX4_RESERVE_ETH_BF_QP);
@@ -114,12 +114,12 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
goto err_hwq_res;
}
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
- ring->qp.event = mlx4_en_sqp_event;
+ ring->sp_qp.event = mlx4_en_sqp_event;
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
@@ -141,7 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
if (queue_index < priv->num_tx_rings_p_up)
cpumask_set_cpu(cpumask_local_spread(queue_index,
priv->mdev->dev->numa_node),
- &ring->affinity_mask);
+ &ring->sp_affinity_mask);
*pring = ring;
return 0;
@@ -149,7 +149,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_hwq_res:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -171,10 +171,10 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
if (ring->bf_alloced)
mlx4_bf_free(mdev->dev, &ring->bf);
- mlx4_qp_remove(mdev->dev, &ring->qp);
- mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_remove(mdev->dev, &ring->sp_qp);
+ mlx4_qp_free(mdev->dev, &ring->sp_qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
kvfree(ring->tx_info);
@@ -190,7 +190,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- ring->cqn = cq;
+ ring->sp_cqn = cq;
ring->prod = 0;
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
@@ -198,21 +198,21 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->free_tx_desc = mlx4_en_free_tx_desc;
- ring->qp_state = MLX4_QP_STATE_RST;
- ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
+ ring->sp_qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8);
ring->mr_key = cpu_to_be32(mdev->mr.key);
- mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
- ring->cqn, user_prio, &ring->context);
+ mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn,
+ ring->sp_cqn, user_prio, &ring->sp_context);
if (ring->bf_alloced)
- ring->context.usr_page =
+ ring->sp_context.usr_page =
cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
ring->bf.uar->index));
- err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
- &ring->qp, &ring->qp_state);
- if (!cpumask_empty(&ring->affinity_mask))
- netif_set_xps_queue(priv->dev, &ring->affinity_mask,
+ err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
+ &ring->sp_qp, &ring->sp_qp_state);
+ if (!cpumask_empty(&ring->sp_affinity_mask))
+ netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask,
ring->queue_index);
return err;
@@ -223,8 +223,8 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
{
struct mlx4_en_dev *mdev = priv->mdev;
- mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
- MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+ mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp);
}
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
@@ -392,7 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
- netdev_tx_reset_queue(ring->tx_queue);
+ if (ring->tx_queue)
+ netdev_tx_reset_queue(ring->tx_queue);
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -405,7 +406,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
- struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
+ struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
struct mlx4_cqe *cqe;
u16 index;
u16 new_index, ring_index, stamp_index;
@@ -807,7 +808,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
- ring = priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[TX][tx_ind];
if (!priv->port_up)
goto tx_drop;
@@ -1078,7 +1079,8 @@ tx_drop:
return NETDEV_TX_OK;
}
-netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+ struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
int tx_ind, int *doorbell_pending)
{
@@ -1101,7 +1103,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
"mlx4_en_xmit_frame requires minimum size tx desc");
- ring = priv->tx_ring[tx_ind];
+ ring = priv->tx_ring[TX_XDP][tx_ind];
if (!priv->port_up)
goto tx_drop;
@@ -1153,8 +1155,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- ring->packets++;
- ring->bytes += tx_info->nr_bytes;
+ rx_ring->xdp_tx++;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
ring->prod += nr_txbb;
@@ -1178,7 +1179,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
return NETDEV_TX_OK;
tx_drop_count:
- ring->tx_dropped++;
+ rx_ring->xdp_tx_full++;
tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index a3528dd1e72e..20a936428f4a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -207,8 +207,11 @@ enum {
*/
enum cq_type {
- RX = 0,
- TX = 1,
+ /* keep tx types first */
+ TX,
+ TX_XDP,
+#define MLX4_EN_NUM_TX_TYPES (TX_XDP + 1)
+ RX,
};
@@ -278,46 +281,50 @@ struct mlx4_en_tx_ring {
u32 last_nr_txbb;
u32 cons;
unsigned long wake_queue;
+ struct netdev_queue *tx_queue;
+ u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner,
+ u64 timestamp, int napi_mode);
+ struct mlx4_en_rx_ring *recycle_ring;
/* cache line used and dirtied in mlx4_en_xmit() */
u32 prod ____cacheline_aligned_in_smp;
+ unsigned int tx_dropped;
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
- unsigned int tx_dropped;
struct mlx4_bf bf;
- unsigned long queue_stopped;
/* Following part should be mostly read */
- cpumask_t affinity_mask;
- struct mlx4_qp qp;
- struct mlx4_hwq_resources wqres;
+ __be32 doorbell_qpn;
+ __be32 mr_key;
u32 size; /* number of TXBBs */
u32 size_mask;
- u16 stride;
u32 full_size;
- u16 cqn; /* index of port CQ associated with this ring */
u32 buf_size;
- __be32 doorbell_qpn;
- __be32 mr_key;
void *buf;
struct mlx4_en_tx_info *tx_info;
- struct mlx4_en_rx_ring *recycle_ring;
- u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner,
- u64 timestamp, int napi_mode);
- u8 *bounce_buf;
- struct mlx4_qp_context context;
int qpn;
- enum mlx4_qp_state qp_state;
u8 queue_index;
bool bf_enabled;
bool bf_alloced;
- struct netdev_queue *tx_queue;
- int hwtstamp_tx_type;
+ u8 hwtstamp_tx_type;
+ u8 *bounce_buf;
+
+ /* Not used in fast path
+ * Only queue_stopped might be used if BQL is not properly working.
+ */
+ unsigned long queue_stopped;
+ struct mlx4_hwq_resources sp_wqres;
+ struct mlx4_qp sp_qp;
+ struct mlx4_qp_context sp_context;
+ cpumask_t sp_affinity_mask;
+ enum mlx4_qp_state sp_qp_state;
+ u16 sp_stride;
+ u16 sp_cqn; /* index of port CQ associated with this ring */
} ____cacheline_aligned_in_smp;
struct mlx4_en_rx_desc {
@@ -347,6 +354,9 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long xdp_drop;
+ unsigned long xdp_tx;
+ unsigned long xdp_tx_full;
unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
@@ -361,7 +371,7 @@ struct mlx4_en_cq {
int size;
int buf_size;
int vector;
- enum cq_type is_tx;
+ enum cq_type type;
u16 moder_time;
u16 moder_cnt;
struct mlx4_cqe *buf;
@@ -372,7 +382,7 @@ struct mlx4_en_cq {
struct mlx4_en_port_profile {
u32 flags;
- u32 tx_ring_num;
+ u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES];
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
@@ -569,17 +579,16 @@ struct mlx4_en_priv {
u32 flags;
u8 num_tx_rings_p_up;
u32 tx_work_limit;
- u32 tx_ring_num;
+ u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES];
u32 rx_ring_num;
u32 rx_skb_size;
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 num_frags;
u16 log_rx_info;
- int xdp_ring_num;
- struct mlx4_en_tx_ring **tx_ring;
+ struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
- struct mlx4_en_cq **tx_cq;
+ struct mlx4_en_cq **tx_cq[MLX4_EN_NUM_TX_TYPES];
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
@@ -597,6 +606,7 @@ struct mlx4_en_priv {
struct mlx4_en_flow_stats_rx rx_flowstats;
struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
+ struct mlx4_en_xdp_stats xdp_stats;
struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
@@ -685,7 +695,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
+ struct mlx4_en_rx_alloc *frame,
struct net_device *dev, unsigned int length,
int tx_ind, int *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
@@ -744,6 +755,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
+void mlx4_en_fold_software_stats(struct net_device *dev);
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7fd466c0b929..48641cb0367f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -55,6 +55,13 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+struct mlx4_en_xdp_stats {
+ unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_tx;
+ unsigned long rx_xdp_tx_full;
+#define NUM_XDP_STATS 3
+};
+
#define NUM_MAIN_STATS 21
#define MLX4_NUM_PRIORITIES 8
@@ -107,7 +114,8 @@ enum {
};
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
- NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS)
+ NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
+ NUM_XDP_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))