diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_tx.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 121 |
1 files changed, 75 insertions, 46 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 817f4154b86d..87f35bcbeff8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -42,8 +42,9 @@ #include <linux/tcp.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/moduleparam.h> #include <linux/indirect_call_wrapper.h> +#include <net/ipv6.h> +#include <net/page_pool/helpers.h> #include "mlx4_en.h" @@ -65,7 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->sp_stride = stride; - ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; + ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); @@ -77,9 +78,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); - ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); + ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE, + GFP_KERNEL, node); if (!ring->bounce_buf) { - ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE, + GFP_KERNEL); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; @@ -226,7 +229,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) { - return ring->prod - ring->cons > ring->full_size; + u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons); + + return used > ring->full_size; } static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, @@ -346,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; - struct mlx4_en_rx_alloc frame = { - .page = tx_info->page, - .dma = tx_info->map0_dma, - }; - - if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { - dma_unmap_page(priv->ddev, tx_info->map0_dma, - PAGE_SIZE, priv->dma_dir); - put_page(tx_info->page); - } + struct page_pool *pool = ring->recycle_ring->pp; + + /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */ + page_pool_put_full_page(pool, tx_info->page, !!napi_mode); return tx_info->nr_txbb; } @@ -446,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev, if (unlikely(!priv->port_up)) return 0; + if (unlikely(!napi_budget) && cq->type == TX_XDP) + return 0; netdev_txq_bql_complete_prefetchw(ring->tx_queue); @@ -635,19 +636,28 @@ static int get_real_size(const struct sk_buff *skb, struct net_device *dev, int *lso_header_size, bool *inline_ok, - void **pfrag) + void **pfrag, + int *hopbyhop) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; if (shinfo->gso_size) { *inline_ok = false; - if (skb->encapsulation) - *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); - else - *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + *hopbyhop = 0; + if (skb->encapsulation) { + *lso_header_size = skb_inner_tcp_all_headers(skb); + } else { + /* Detects large IPV6 TCP packets and prepares for removal of + * HBH header that has been pushed by ip6_xmit(), + * mainly so that tcpdump can dissect them. + */ + if (ipv6_has_hopopt_jumbo(skb)) + *hopbyhop = sizeof(struct hop_jumbo_hdr); + *lso_header_size = skb_tcp_all_headers(skb); + } real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + - ALIGN(*lso_header_size + 4, DS_SIZE); + ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if * it contains data */ @@ -688,32 +698,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, inl->byte_count = cpu_to_be32(1 << 31 | skb->len); } else { inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); - memset(((void *)(inl + 1)) + skb->len, 0, + memset(inl->data + skb->len, 0, MIN_PKT_LEN - skb->len); } - skb_copy_from_linear_data(skb, inl + 1, hlen); + skb_copy_from_linear_data(skb, inl->data, hlen); if (shinfo->nr_frags) - memcpy(((void *)(inl + 1)) + hlen, fragptr, + memcpy(inl->data + hlen, fragptr, skb_frag_size(&shinfo->frags[0])); } else { inl->byte_count = cpu_to_be32(1 << 31 | spc); if (hlen <= spc) { - skb_copy_from_linear_data(skb, inl + 1, hlen); + skb_copy_from_linear_data(skb, inl->data, hlen); if (hlen < spc) { - memcpy(((void *)(inl + 1)) + hlen, + memcpy(inl->data + hlen, fragptr, spc - hlen); fragptr += spc - hlen; } - inl = (void *) (inl + 1) + spc; - memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + inl = (void *)inl->data + spc; + memcpy(inl->data, fragptr, skb->len - spc); } else { - skb_copy_from_linear_data(skb, inl + 1, spc); - inl = (void *) (inl + 1) + spc; - skb_copy_from_linear_data_offset(skb, spc, inl + 1, + skb_copy_from_linear_data(skb, inl->data, spc); + inl = (void *)inl->data + spc; + skb_copy_from_linear_data_offset(skb, spc, inl->data, hlen - spc); if (shinfo->nr_frags) - memcpy(((void *)(inl + 1)) + hlen - spc, + memcpy(inl->data + hlen - spc, fragptr, skb_frag_size(&shinfo->frags[0])); } @@ -874,6 +884,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int desc_size; int real_size; u32 index, bf_index; + struct ipv6hdr *h6; __be32 op_own; int lso_header_size; void *fragptr = NULL; @@ -882,6 +893,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool stop_queue; bool inline_ok; u8 data_offset; + int hopbyhop; bool bf_ok; tx_ind = skb_get_queue_mapping(skb); @@ -891,18 +903,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; real_size = get_real_size(skb, shinfo, dev, &lso_header_size, - &inline_ok, &fragptr); + &inline_ok, &fragptr, &hopbyhop); if (unlikely(!real_size)) goto tx_drop_count; /* Align descriptor to TXBB size */ desc_size = ALIGN(real_size, TXBB_SIZE); nr_txbb = desc_size >> LOG_TXBB_SIZE; - if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { - if (netif_msg_tx_err(priv)) - en_warn(priv, "Oversized header or SG list\n"); - goto tx_drop_count; - } bf_ok = ring->bf_enabled; if (skb_vlan_tag_present(skb)) { @@ -930,6 +937,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(index + nr_txbb <= ring->size)) tx_desc = ring->buf + (index << LOG_TXBB_SIZE); else { + if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop_count; + } tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; bounce = true; bf_ok = false; @@ -944,7 +956,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) data = &tx_desc->data; data_offset = offsetof(struct mlx4_en_tx_desc, data); } else { - int lso_align = ALIGN(lso_header_size + 4, DS_SIZE); + int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE); data = (void *)&tx_desc->lso + lso_align; data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align; @@ -977,7 +989,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->ts_requested = 1; } - /* Prepare ctrl segement apart opcode+ownership, which depends on + /* Prepare ctrl segment apart opcode+ownership, which depends on * whether LSO is used */ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { @@ -1009,14 +1021,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + lso_header_size -= hopbyhop; /* Fill in the LSO prefix */ tx_desc->lso.mss_hdr_size = cpu_to_be32( shinfo->gso_size << 16 | lso_header_size); - /* Copy headers; - * note that we already verified that it is linear */ - memcpy(tx_desc->lso.header, skb->data, lso_header_size); + if (unlikely(hopbyhop)) { + /* remove the HBH header. + * Layout: [Ethernet header][IPv6 header][HBH][TCP header] + */ + memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6)); + h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN); + h6->nexthdr = IPPROTO_TCP; + /* Copy the TCP header after the IPv6 one */ + memcpy(h6 + 1, + skb->data + ETH_HLEN + sizeof(*h6) + + sizeof(struct hop_jumbo_hdr), + tcp_hdrlen(skb)); + /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */ + } else { + /* Copy headers; + * note that we already verified that it is linear + */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + } ring->tso_packets++; i = shinfo->gso_segs; @@ -1053,7 +1082,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); } - ring->prod += nr_txbb; + WRITE_ONCE(ring->prod, ring->prod + nr_txbb); /* If we used a bounce buffer then copy descriptor back into place */ if (unlikely(bounce)) @@ -1162,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_desc = ring->buf + (index << LOG_TXBB_SIZE); data = &tx_desc->data; - dma = frame->dma; + dma = page_pool_get_dma_addr(frame->page); tx_info->page = frame->page; frame->page = NULL; @@ -1184,7 +1213,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, rx_ring->xdp_tx++; - ring->prod += MLX4_EN_XDP_TX_NRTXBB; + WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB); /* Ensure new descriptor hits memory * before setting ownership of this descriptor to HW |
