summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx4/en_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_tx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c311
1 files changed, 189 insertions, 122 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 73faa3d77921..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -42,7 +42,9 @@
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/moduleparam.h>
+#include <linux/indirect_call_wrapper.h>
+#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -57,17 +59,14 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- en_err(priv, "Failed allocating TX ring\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
}
ring->size = size;
ring->size_mask = size - 1;
ring->sp_stride = stride;
- ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+ ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
@@ -79,9 +78,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
+ ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL, node);
if (!ring->bounce_buf) {
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_info;
@@ -105,7 +106,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
(unsigned long long) ring->sp_wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
- MLX4_RESERVE_ETH_BF_QP);
+ MLX4_RESERVE_ETH_BF_QP,
+ MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
goto err_hwq_res;
@@ -131,6 +133,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
+ ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
@@ -226,7 +229,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
- return ring->prod - ring->cons > ring->full_size;
+ u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons);
+
+ return used > ring->full_size;
}
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
@@ -263,6 +268,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
}
}
+INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u64 timestamp,
+ int napi_mode));
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -294,12 +303,12 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
dma_unmap_single(priv->ddev,
tx_info->map0_dma,
tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
dma_unmap_page(priv->ddev,
tx_info->map0_dma,
tx_info->map0_byte_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Optimize the common case when there are no wraparounds */
if (likely((void *)tx_desc +
(tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
@@ -308,7 +317,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
dma_unmap_page(priv->ddev,
(dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
} else {
if ((void *)data >= end)
@@ -322,7 +331,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
dma_unmap_page(priv->ddev,
(dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
}
@@ -331,22 +340,21 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
return tx_info->nr_txbb;
}
+INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u64 timestamp,
+ int napi_mode));
+
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -384,8 +392,37 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq, int napi_budget)
+static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
+ u16 cqe_index, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_tx_desc *tx_desc;
+ u16 wqe_index;
+ int desc_size;
+
+ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
+ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
+ false);
+
+ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
+ tx_info = &ring->tx_info[wqe_index];
+ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
+ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
+ wqe_index, desc_size);
+ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
+
+ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
+ return;
+
+ en_err(priv, "Scheduling port restart\n");
+ queue_work(mdev->workqueue, &priv->restart_task);
+}
+
+int mlx4_en_process_tx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -407,14 +444,16 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 ring_cons;
if (unlikely(!priv->port_up))
- return true;
+ return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
- ring_cons = ACCESS_ONCE(ring->cons);
+ last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
+ ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
@@ -430,13 +469,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
- MLX4_CQE_OPCODE_ERROR)) {
- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
-
- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
- cqe_err->vendor_err_syndrome,
- cqe_err->syndrome);
- }
+ MLX4_CQE_OPCODE_ERROR))
+ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
+ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
+ ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
@@ -451,7 +487,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
- last_nr_txbb = ring->free_tx_desc(
+ last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
+ mlx4_en_free_tx_desc,
+ mlx4_en_recycle_tx_desc,
priv, ring, ring_index,
timestamp, napi_budget);
@@ -478,11 +516,11 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
wmb();
/* we want to dirty this cache line once */
- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
+ WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
- return done < budget;
+ return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
@@ -494,7 +532,7 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
ring->wake_queue++;
}
- return done < budget;
+ return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -514,14 +552,14 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
- bool clean_complete;
+ int work_done;
- clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
- if (!clean_complete)
+ work_done = mlx4_en_process_tx_cq(dev, cq, budget);
+ if (work_done >= budget)
return budget;
- napi_complete(napi);
- mlx4_en_arm_cq(priv, cq);
+ if (napi_complete_done(napi, work_done))
+ mlx4_en_arm_cq(priv, cq);
return 0;
}
@@ -598,19 +636,28 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag)
+ void **pfrag,
+ int *hopbyhop)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- if (skb->encapsulation)
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
- else
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *hopbyhop = 0;
+ if (skb->encapsulation) {
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
+ } else {
+ /* Detects large IPV6 TCP packets and prepares for removal of
+ * HBH header that has been pushed by ip6_xmit(),
+ * mainly so that tcpdump can dissect them.
+ */
+ if (ipv6_has_hopopt_jumbo(skb))
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
+ *lso_header_size = skb_tcp_all_headers(skb);
+ }
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size + 4, DS_SIZE);
+ ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -643,7 +690,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
- int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+ int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl);
unsigned int hlen = skb_headlen(skb);
if (skb->len <= spc) {
@@ -651,32 +698,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
- memset(((void *)(inl + 1)) + skb->len, 0,
+ memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen, fragptr,
+ memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
- memcpy(((void *)(inl + 1)) + hlen,
+ memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
- inl = (void *) (inl + 1) + spc;
- memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ inl = (void *)inl->data + spc;
+ memcpy(inl->data, fragptr, skb->len - spc);
} else {
- skb_copy_from_linear_data(skb, inl + 1, spc);
- inl = (void *) (inl + 1) + spc;
- skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_copy_from_linear_data(skb, inl->data, spc);
+ inl = (void *)inl->data + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen - spc,
+ memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
@@ -687,15 +734,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return skb_tx_hash(dev, skb);
+ return netdev_pick_tx(dev, skb, NULL);
- return fallback(dev, skb) % rings_p_up;
+ return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
@@ -717,8 +764,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ (__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
@@ -774,9 +820,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
/* Map fragments if any */
for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
+ const skb_frag_t *frag = &shinfo->frags[i_frag];
byte_count = skb_frag_size(frag);
dma = skb_frag_dma_map(ddev, frag,
0, byte_count,
@@ -797,7 +841,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
dma = dma_map_single(ddev, skb->data +
lso_header_size, byte_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma))
goto tx_drop_unmap;
@@ -819,7 +863,7 @@ tx_drop_unmap:
++data;
dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
return false;
@@ -834,11 +878,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
+ u32 __maybe_unused ring_cons;
int tx_ind;
int nr_txbb;
int desc_size;
int real_size;
u32 index, bf_index;
+ struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -847,7 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- u32 ring_cons;
+ int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -856,22 +902,14 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!priv->port_up))
goto tx_drop;
- /* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = ACCESS_ONCE(ring->cons);
-
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr);
+ &inline_ok, &fragptr, &hopbyhop);
if (unlikely(!real_size))
goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size >> LOG_TXBB_SIZE;
- if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop_count;
- }
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
@@ -890,10 +928,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
- /* Track current inflight packets for performance analysis */
- AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - ring_cons - 1));
-
/* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask;
bf_index = ring->prod;
@@ -903,6 +937,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
+ if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Oversized header or SG list\n");
+ goto tx_drop_count;
+ }
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
bf_ok = false;
@@ -917,7 +956,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -950,7 +989,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
- /* Prepare ctrl segement apart opcode+ownership, which depends on
+ /* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
@@ -982,14 +1021,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
- /* Copy headers;
- * note that we already verified that it is linear */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ if (unlikely(hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
+ h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ }
ring->tso_packets++;
i = shinfo->gso_segs;
@@ -1004,8 +1060,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
- netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
- AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
build_inline_wqe(tx_desc, skb, shinfo, fragptr);
@@ -1028,7 +1082,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
}
- ring->prod += nr_txbb;
+ WRITE_ONCE(ring->prod, ring->prod + nr_txbb);
/* If we used a bounce buffer then copy descriptor back into place */
if (unlikely(bounce))
@@ -1042,7 +1096,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
- send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+ send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+ tx_info->nr_bytes,
+ netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;
@@ -1065,7 +1122,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = ACCESS_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
@@ -1084,13 +1140,35 @@ tx_drop:
#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
/ 16) & 0x3f)
+void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf +
+ (i << LOG_TXBB_SIZE);
+
+ tx_info->map0_byte_count = PAGE_SIZE;
+ tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
+ tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
+ tx_info->ts_requested = 0;
+ tx_info->nr_maps = 1;
+ tx_info->linear = 1;
+ tx_info->inl = 0;
+
+ tx_desc->data.lkey = ring->mr_key;
+ tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+ }
+}
+
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
- struct net_device *dev, unsigned int length,
+ struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_en_tx_info *tx_info;
struct mlx4_wqe_data_seg *data;
@@ -1110,51 +1188,40 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index];
- /* Track current inflight packets for performance analysis */
- AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - READ_ONCE(ring->cons) - 1));
-
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
tx_info->map0_dma = dma;
- tx_info->map0_byte_count = PAGE_SIZE;
- tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
- tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
- tx_info->ts_requested = 0;
- tx_info->nr_maps = 1;
- tx_info->linear = 1;
- tx_info->inl = 0;
dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
- length, PCI_DMA_TODEVICE);
+ length, DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma + frame->page_offset);
- data->lkey = ring->mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(length);
/* tx completion can avoid cache line miss for common cases */
- tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
rx_ring->xdp_tx++;
- AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
- ring->prod += MLX4_EN_XDP_TX_NRTXBB;
+ WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB);
- qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ ring->xmit_more++;
- mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0,
- op_own, false, false);
*doorbell_pending = true;
return NETDEV_TX_OK;