diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/en_rx.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1177 |
1 files changed, 711 insertions, 466 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index dec455c8f627..13666d50b90f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -31,7 +31,8 @@ * */ -#include <net/busy_poll.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -40,160 +41,57 @@ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> +#include <linux/irq.h> +#include <linux/skbuff_ref.h> -#include "mlx4_en.h" - -static int mlx4_alloc_pages(struct mlx4_en_priv *priv, - struct mlx4_en_rx_alloc *page_alloc, - const struct mlx4_en_frag_info *frag_info, - gfp_t _gfp) -{ - int order; - struct page *page; - dma_addr_t dma; - - for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { - gfp_t gfp = _gfp; +#include <net/ip.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ip6_checksum.h> +#endif +#include <net/page_pool/helpers.h> - if (order) - gfp |= __GFP_COMP | __GFP_NOWARN; - page = alloc_pages(gfp, order); - if (likely(page)) - break; - if (--order < 0 || - ((PAGE_SIZE << order) < frag_info->frag_size)) - return -ENOMEM; - } - dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - PCI_DMA_FROMDEVICE); - if (dma_mapping_error(priv->ddev, dma)) { - put_page(page); - return -ENOMEM; - } - page_alloc->size = PAGE_SIZE << order; - page_alloc->page = page; - page_alloc->dma = dma; - page_alloc->offset = frag_info->frag_align; - /* Not doing get_page() for each frag is a big win - * on asymetric workloads. - */ - atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); - return 0; -} +#include "mlx4_en.h" static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, - struct mlx4_en_rx_alloc *ring_alloc, gfp_t gfp) { - struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; - const struct mlx4_en_frag_info *frag_info; - struct page *page; dma_addr_t dma; int i; - for (i = 0; i < priv->num_frags; i++) { - frag_info = &priv->frag_info[i]; - page_alloc[i] = ring_alloc[i]; - page_alloc[i].offset += frag_info->frag_stride; - if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) - continue; - if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) - goto out; - } - - for (i = 0; i < priv->num_frags; i++) { - frags[i] = ring_alloc[i]; - dma = ring_alloc[i].dma + ring_alloc[i].offset; - ring_alloc[i] = page_alloc[i]; - rx_desc->data[i].addr = cpu_to_be64(dma); - } - - return 0; + for (i = 0; i < priv->num_frags; i++, frags++) { + if (!frags->page) { + frags->page = page_pool_alloc_pages(ring->pp, gfp); + if (!frags->page) { + ring->alloc_fail++; + return -ENOMEM; + } + page_pool_fragment_page(frags->page, 1); + frags->page_offset = priv->rx_headroom; -out: - while (i--) { - frag_info = &priv->frag_info[i]; - if (page_alloc[i].page != ring_alloc[i].page) { - dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].size, PCI_DMA_FROMDEVICE); - page = page_alloc[i].page; - atomic_set(&page->_count, 1); - put_page(page); + ring->rx_alloc_pages++; } - } - return -ENOMEM; -} - -static void mlx4_en_free_frag(struct mlx4_en_priv *priv, - struct mlx4_en_rx_alloc *frags, - int i) -{ - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - - if (frags[i].offset + frag_info->frag_stride > frags[i].size) - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, - PCI_DMA_FROMDEVICE); - - if (frags[i].page) - put_page(frags[i].page); -} - -static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) -{ - int i; - struct mlx4_en_rx_alloc *page_alloc; - - for (i = 0; i < priv->num_frags; i++) { - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - - if (mlx4_alloc_pages(priv, &ring->page_alloc[i], - frag_info, GFP_KERNEL)) - goto out; + dma = page_pool_get_dma_addr(frags->page); + rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset); } return 0; - -out: - while (i--) { - struct page *page; - - page_alloc = &ring->page_alloc[i]; - dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); - page = page_alloc->page; - atomic_set(&page->_count, 1); - put_page(page); - page_alloc->page = NULL; - } - return -ENOMEM; } -static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) +static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frag) { - struct mlx4_en_rx_alloc *page_alloc; - int i; - - for (i = 0; i < priv->num_frags; i++) { - const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - - page_alloc = &ring->page_alloc[i]; - en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", - i, page_count(page_alloc->page)); - - dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->size, PCI_DMA_FROMDEVICE); - while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { - put_page(page_alloc->page); - page_alloc->offset += frag_info->frag_stride; - } - page_alloc->page = NULL; - } + if (frag->page) + page_pool_put_full_page(ring->pp, frag->page, false); + /* We need to clear all fields, otherwise a change of priv->log_rx_info + * could lead to see garbage later in frag->page. + */ + memset(frag, 0, sizeof(*frag)); } -static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, +static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; @@ -222,11 +120,17 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index, gfp_t gfp) { - struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); + struct mlx4_en_rx_desc *rx_desc = ring->buf + + (index << ring->log_stride); struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); - return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); + return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); +} + +static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring) +{ + return ring->prod == ring->cons; } static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) @@ -234,7 +138,8 @@ static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); } -static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, +/* slow path */ +static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) { @@ -244,10 +149,11 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, frags = ring->rx_info + (index << priv->log_rx_info); for (nr = 0; nr < priv->num_frags; nr++) { en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); - mlx4_en_free_frag(priv, frags, nr); + mlx4_en_free_frag(priv, ring, frags + nr); } } +/* Function not in fast-path */ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; @@ -257,19 +163,17 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, GFP_KERNEL)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { - en_err(priv, "Failed to allocate " - "enough rx buffers\n"); + en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); - en_warn(priv, "Only %d buffers allocated " - "reducing ring size to %d", + en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", ring->actual_size, new_size); goto reduce_rings; } @@ -282,7 +186,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; @@ -302,22 +206,50 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ring->cons, ring->prod); /* Unmap and free Rx buffers */ - BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); - while (ring->cons != ring->prod) { - index = ring->cons & ring->size_mask; + for (index = 0; index < ring->size; index++) { en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); - ++ring->cons; + } + ring->cons = 0; + ring->prod = 0; +} + +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) +{ + int i; + int num_of_eqs; + int num_rx_rings; + struct mlx4_dev *dev = mdev->dev; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + mlx4_get_eqs_per_port(mdev->dev, i), + DEF_RX_RINGS)); + + num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : + min_t(int, num_of_eqs, num_online_cpus()); + mdev->profile.prof[i].rx_ring_num = + rounddown_pow_of_two(num_rx_rings); } } int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, u32 size, u16 stride) + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; + struct page_pool_params pp = {}; + struct mlx4_en_rx_ring *ring; int err = -ENOMEM; int tmp; + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + en_err(priv, "Failed to allocate RX ring structure\n"); + return -ENOMEM; + } + ring->prod = 0; ring->cons = 0; ring->size = size; @@ -326,36 +258,64 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + pp.flags = PP_FLAG_DMA_MAP; + pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE); + pp.nid = node; + pp.napi = &priv->rx_cq[queue_index]->napi; + pp.netdev = priv->dev; + pp.dev = &mdev->dev->persist->pdev->dev; + pp.dma_dir = priv->dma_dir; + + ring->pp = page_pool_create(&pp); + if (IS_ERR(ring->pp)) { + err = PTR_ERR(ring->pp); + goto err_ring; + } + + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0) + goto err_pp; + + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL, + ring->pp); + if (err) + goto err_xdp_info; + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); - ring->rx_info = vmalloc(tmp); - if (!ring->rx_info) - return -ENOMEM; + ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node); + if (!ring->rx_info) { + err = -ENOMEM; + goto err_xdp_info; + } en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", ring->rx_info, tmp); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, - ring->buf_size, 2 * PAGE_SIZE); + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) - goto err_ring; + goto err_info; - err = mlx4_en_map_buffer(&ring->wqres.buf); - if (err) { - en_err(priv, "Failed to map RX buffer\n"); - goto err_hwq; - } ring->buf = ring->wqres.buf.direct.buf; ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; + *pring = ring; return 0; -err_hwq: - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); -err_ring: - vfree(ring->rx_info); +err_info: + kvfree(ring->rx_info); ring->rx_info = NULL; +err_xdp_info: + xdp_rxq_info_unreg(&ring->xdp_rxq); +err_pp: + page_pool_destroy(ring->pp); +err_ring: + kfree(ring); + *pring = NULL; + return err; } @@ -369,16 +329,22 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) DS_SIZE * priv->num_frags); for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; ring->prod = 0; ring->cons = 0; ring->actual_size = 0; - ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; ring->stride = stride; - if (ring->stride <= TXBB_SIZE) + if (ring->stride <= TXBB_SIZE) { + /* Stamp first unused send wqe */ + __be32 *ptr = (__be32 *)ring->buf; + __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT); + *ptr = stamp; + /* Move pointer to start of rx section */ ring->buf += TXBB_SIZE; + } ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride; @@ -389,23 +355,13 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) /* Initialize all descriptors */ for (i = 0; i < ring->size; i++) mlx4_en_init_rx_desc(priv, ring, i); - - /* Initialize page allocators */ - err = mlx4_en_init_allocator(priv, ring); - if (err) { - en_err(priv, "Failed initializing ring allocator\n"); - if (ring->stride <= TXBB_SIZE) - ring->buf -= TXBB_SIZE; - ring_ind--; - goto err_allocator; - } } err = mlx4_en_fill_rx_buffers(priv); if (err) goto err_buffers; for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { - ring = &priv->rx_ring[ring_ind]; + ring = priv->rx_ring[ring_ind]; ring->size_mask = ring->actual_size - 1; mlx4_en_update_rx_prod_db(ring); @@ -415,31 +371,57 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) err_buffers: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) - mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); + mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); ring_ind = priv->rx_ring_num - 1; -err_allocator: while (ring_ind >= 0) { - if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) - priv->rx_ring[ring_ind].buf -= TXBB_SIZE; - mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); + if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) + priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; ring_ind--; } return err; } +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { + local_bh_disable(); + napi_schedule(&priv->rx_cq[ring]->napi); + local_bh_enable(); + } + } +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, u32 size, u16 stride) + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; - - mlx4_en_unmap_buffer(&ring->wqres.buf); + struct mlx4_en_rx_ring *ring = *pring; + struct bpf_prog *old_prog; + + old_prog = rcu_dereference_protected( + ring->xdp_prog, + lockdep_is_held(&mdev->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); + xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); - vfree(ring->rx_info); + kvfree(ring->rx_info); + page_pool_destroy(ring->pp); ring->rx_info = NULL; -#ifdef CONFIG_RFS_ACCEL - mlx4_en_cleanup_filters(priv, ring); -#endif + kfree(ring); + *pring = NULL; } void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, @@ -448,183 +430,312 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; - mlx4_en_destroy_allocator(priv, ring); } static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, struct sk_buff *skb, int length) { - struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; - struct mlx4_en_frag_info *frag_info; - int nr; + const struct mlx4_en_frag_info *frag_info = priv->frag_info; + unsigned int truesize = 0; + bool release = true; + int nr, frag_size; + struct page *page; dma_addr_t dma; /* Collect used fragments while replacing them in the HW descriptors */ - for (nr = 0; nr < priv->num_frags; nr++) { - frag_info = &priv->frag_info[nr]; - if (length <= frag_info->frag_prefix_size) - break; - if (!frags[nr].page) + for (nr = 0;; frags++) { + frag_size = min_t(int, length, frag_info->frag_size); + + page = frags->page; + if (unlikely(!page)) goto fail; - dma = be64_to_cpu(rx_desc->data[nr].addr); - dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, - DMA_FROM_DEVICE); + dma = page_pool_get_dma_addr(page); + dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, + frag_size, priv->dma_dir); + + __skb_fill_page_desc(skb, nr, page, frags->page_offset, + frag_size); + + truesize += frag_info->frag_stride; + if (frag_info->frag_stride == PAGE_SIZE / 2) { + struct netmem_desc *desc = pp_page_to_nmdesc(page); + + frags->page_offset ^= PAGE_SIZE / 2; + release = page_count(page) != 1 || + atomic_long_read(&desc->pp_ref_count) != 1 || + page_is_pfmemalloc(page) || + page_to_nid(page) != numa_mem_id(); + } else if (!priv->rx_headroom) { + /* rx_headroom for non XDP setup is always 0. + * When XDP is set, the above condition will + * guarantee page is always released. + */ + u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); + + frags->page_offset += sz_align; + release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; + } + if (release) { + frags->page = NULL; + } else { + page_pool_ref_page(page); + } - /* Save page reference in skb */ - __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); - skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); - skb_frags_rx[nr].page_offset = frags[nr].offset; - skb->truesize += frag_info->frag_stride; - frags[nr].page = NULL; + nr++; + length -= frag_size; + if (!length) + break; + frag_info++; } - /* Adjust size of last fragment to match actual length */ - if (nr > 0) - skb_frag_size_set(&skb_frags_rx[nr - 1], - length - priv->frag_info[nr - 1].frag_prefix_size); + skb->truesize += truesize; return nr; fail: while (nr > 0) { nr--; - __skb_frag_unref(&skb_frags_rx[nr]); + __skb_frag_unref(skb_shinfo(skb)->frags + nr, false); } return 0; } - -static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, - struct mlx4_en_rx_desc *rx_desc, - struct mlx4_en_rx_alloc *frags, - unsigned int length) +static void validate_loopback(struct mlx4_en_priv *priv, void *va) { - struct sk_buff *skb; - void *va; - int used_frags; - dma_addr_t dma; + const unsigned char *data = va + ETH_HLEN; + int i; - skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); - if (!skb) { - en_dbg(RX_ERR, priv, "Failed allocating skb\n"); - return NULL; + for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) { + if (data[i] != (unsigned char)i) + return; } - skb_reserve(skb, NET_IP_ALIGN); - skb->len = length; - - /* Get pointer to first fragment so we could copy the headers into the - * (linear part of the) skb */ - va = page_address(frags[0].page) + frags[0].offset; - - if (length <= SMALL_PACKET_SIZE) { - /* We are copying all relevant data to the skb - temporarily - * sync buffers for the copy */ - dma = be64_to_cpu(rx_desc->data[0].addr); - dma_sync_single_for_cpu(priv->ddev, dma, length, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, va, length); - skb->tail += length; - } else { - /* Move relevant fragments to skb */ - used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, - skb, length); - if (unlikely(!used_frags)) { - kfree_skb(skb); - return NULL; - } - skb_shinfo(skb)->nr_frags = used_frags; + /* Loopback found */ + priv->loopback_ok = 1; +} - /* Copy headers into the skb linear buffer */ - memcpy(skb->data, va, HEADER_COPY_SIZE); - skb->tail += HEADER_COPY_SIZE; +static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + u32 missing = ring->actual_size - (ring->prod - ring->cons); + + /* Try to batch allocations, but not too much. */ + if (missing < 8) + return; + do { + if (mlx4_en_prepare_rx_desc(priv, ring, + ring->prod & ring->size_mask, + GFP_ATOMIC | __GFP_MEMALLOC)) + break; + ring->prod++; + } while (likely(--missing)); - /* Skip headers in first fragment */ - skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; + mlx4_en_update_rx_prod_db(ring); +} - /* Adjust size of first fragment */ - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); - skb->data_len = length - HEADER_COPY_SIZE; - } - return skb; +/* When hardware doesn't strip the vlan, we need to calculate the checksum + * over it and add it to the hardware's checksum calculation + */ +static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, + struct vlan_hdr *vlanh) +{ + return csum_add(hw_checksum, *(__wsum *)vlanh); } -static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) +/* Although the stack expects checksum which doesn't include the pseudo + * header, the HW adds it. To address that, we are subtracting the pseudo + * header checksum from the checksum value provided by the HW. + */ +static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) { - int i; - int offset = ETH_HLEN; + __u16 length_for_csum = 0; + __wsum csum_pseudo_header = 0; + __u8 ipproto = iph->protocol; + + if (unlikely(ipproto == IPPROTO_SCTP)) + return -1; + + length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); + csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, + length_for_csum, ipproto, 0); + skb->csum = csum_sub(hw_checksum, csum_pseudo_header); + return 0; +} + +#if IS_ENABLED(CONFIG_IPV6) +/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header: + * 4 first bytes : priority, version, flow_lbl + * and 2 additional bytes : nexthdr, hop_limit. + */ +static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + struct ipv6hdr *ipv6h) +{ + __u8 nexthdr = ipv6h->nexthdr; + __wsum temp; + + if (unlikely(nexthdr == IPPROTO_FRAGMENT || + nexthdr == IPPROTO_HOPOPTS || + nexthdr == IPPROTO_SCTP)) + return -1; + + /* priority, version, flow_lbl */ + temp = csum_add(hw_checksum, *(__wsum *)ipv6h); + /* nexthdr and hop_limit */ + skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr); + return 0; +} +#endif - for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { - if (*(skb->data + offset) != (unsigned char) (i & 0xff)) - goto out_loopback; +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + +/* We reach this function only after checking that any of + * the (IPv4 | IPv6) bits are set in cqe->status. + */ +static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + netdev_features_t dev_features) +{ + __wsum hw_checksum = 0; + void *hdr; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to skip + * checksum complete. + */ + if (short_frame(skb->len)) + return -EINVAL; + + hdr = (u8 *)va + sizeof(struct ethhdr); + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { + hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); + hdr += sizeof(struct vlan_hdr); } - /* Loopback found */ - priv->loopback_ok = 1; -out_loopback: - dev_kfree_skb_any(skb); +#if IS_ENABLED(CONFIG_IPV6) + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + return get_fixed_ipv6_csum(hw_checksum, skb, hdr); +#endif + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); } -static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) +#if IS_ENABLED(CONFIG_IPV6) +#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6) +#else +#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4) +#endif + +struct mlx4_en_xdp_buff { + struct xdp_buff xdp; + struct mlx4_cqe *cqe; + struct mlx4_en_dev *mdev; + struct mlx4_en_rx_ring *ring; + struct net_device *dev; +}; + +int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) { - int index = ring->prod & ring->size_mask; + struct mlx4_en_xdp_buff *_ctx = (void *)ctx; - while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) - break; - ring->prod++; - index = ring->prod & ring->size_mask; + if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL)) + return -ENODATA; + + *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev, + mlx4_en_get_cqe_ts(_ctx->cqe)); + return 0; +} + +int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + struct mlx4_en_xdp_buff *_ctx = (void *)ctx; + struct mlx4_cqe *cqe = _ctx->cqe; + enum xdp_rss_hash_type xht = 0; + __be16 status; + + if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH))) + return -ENODATA; + + *hash = be32_to_cpu(cqe->immed_rss_invalid); + status = cqe->status; + if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP)) + xht = XDP_RSS_L4_TCP; + if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP)) + xht = XDP_RSS_L4_UDP; + if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F)) + xht |= XDP_RSS_L3_IPV4; + if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) { + xht |= XDP_RSS_L3_IPV6; + if (cqe->ipv6_ext_mask) + xht |= XDP_RSS_L3_DYNHDR; } + *rss_type = xht; + + return 0; } int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_xdp_buff mxbuf = {}; + int factor = priv->cqe_factor; + struct mlx4_en_rx_ring *ring; + struct bpf_prog *xdp_prog; + int cq_ring = cq->ring; + bool doorbell_pending; + bool xdp_redir_flush; struct mlx4_cqe *cqe; - struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; - struct mlx4_en_rx_alloc *frags; - struct mlx4_en_rx_desc *rx_desc; - struct sk_buff *skb; - int index; - int nr; - unsigned int length; int polled = 0; - int ip_summed; - int factor = priv->cqe_factor; - u64 timestamp; + int index; - if (!priv->port_up) + if (unlikely(!priv->port_up || budget <= 0)) return 0; + ring = priv->rx_ring[cq_ring]; + + xdp_prog = rcu_dereference_bh(ring->xdp_prog); + xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); + doorbell_pending = false; + xdp_redir_flush = false; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, cq->mcq.cons_index & cq->size)) { + struct mlx4_en_rx_alloc *frags; + enum pkt_hash_types hash_type; + struct sk_buff *skb; + unsigned int length; + int ip_summed; + void *va; + int nr; frags = ring->rx_info + (index << priv->log_rx_info); - rx_desc = ring->buf + (index << ring->log_stride); - + va = page_address(frags[0].page) + frags[0].page_offset; + net_prefetchw(va); /* * make sure we read the CQE after we read the ownership bit */ - rmb(); + dma_rmb(); /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { - en_err(priv, "CQE completed in error - vendor " - "syndrom:%d syndrom:%d\n", - ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, - ((struct mlx4_err_cqe *) cqe)->syndrome); + en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n", + ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, + ((struct mlx4_err_cqe *)cqe)->syndrome); goto next; } if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { @@ -636,16 +747,15 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * and not performing the selftest or flb disabled */ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { - struct ethhdr *ethh; + const struct ethhdr *ethh = va; dma_addr_t dma; /* Get pointer to first fragment since we haven't * skb yet and cast it to ethhdr struct */ - dma = be64_to_cpu(rx_desc->data[0].addr); + dma = page_pool_get_dma_addr(frags[0].page); + dma += frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); - ethh = (struct ethhdr *)(page_address(frags[0].page) + - frags[0].offset); if (is_multicast_ether_addr(ethh->h_dest)) { struct mlx4_mac_entry *entry; @@ -655,141 +765,193 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Drop the packet, since HW loopback-ed it */ mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; bucket = &priv->mac_hash[mac_hash]; - rcu_read_lock(); - hlist_for_each_entry_rcu(entry, bucket, hlist) { + hlist_for_each_entry_rcu_bh(entry, bucket, hlist) { if (ether_addr_equal_64bits(entry->mac, - ethh->h_source)) { - rcu_read_unlock(); + ethh->h_source)) goto next; - } } - rcu_read_unlock(); } } + if (unlikely(priv->validate_loopback)) { + validate_loopback(priv, va); + goto next; + } + /* * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); length -= ring->fcs_del; - ring->bytes += length; - ring->packets++; - if (likely(dev->features & NETIF_F_RXCSUM)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - (cqe->checksum == cpu_to_be16(0xffff))) { - ring->csum_ok++; - /* This packet is eligible for GRO if it is: - * - DIX Ethernet (type interpretation) - * - TCP/IP (v4) - * - without IP options - * - not an IP fragment - * - no LLS polling in progress - */ - if (!mlx4_en_cq_ll_polling(cq) && - (dev->features & NETIF_F_GRO)) { - struct sk_buff *gro_skb = napi_get_frags(&cq->napi); - if (!gro_skb) - goto next; - - nr = mlx4_en_complete_rx_desc(priv, - rx_desc, frags, gro_skb, - length); - if (!nr) - goto next; + /* A bpf program gets first chance to drop the packet. It may + * read bytes but not past the end of the frag. + */ + if (xdp_prog) { + dma_addr_t dma; + void *orig_data; + u32 act; - skb_shinfo(gro_skb)->nr_frags = nr; - gro_skb->len = length; - gro_skb->data_len = length; - gro_skb->ip_summed = CHECKSUM_UNNECESSARY; + dma = page_pool_get_dma_addr(frags[0].page); + dma += frags[0].page_offset; + dma_sync_single_for_cpu(priv->ddev, dma, + priv->frag_info[0].frag_size, + DMA_FROM_DEVICE); - if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && - (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - u16 vid = be16_to_cpu(cqe->sl_vid); + xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset, + frags[0].page_offset, length, true); + orig_data = mxbuf.xdp.data; + mxbuf.cqe = cqe; + mxbuf.mdev = priv->mdev; + mxbuf.ring = ring; + mxbuf.dev = dev; + + act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp); + + length = mxbuf.xdp.data_end - mxbuf.xdp.data; + if (mxbuf.xdp.data != orig_data) { + frags[0].page_offset = mxbuf.xdp.data - + mxbuf.xdp.data_hard_start; + va = mxbuf.xdp.data; + } - __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); - } + switch (act) { + case XDP_PASS: + break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) { + ring->xdp_redirect++; + xdp_redir_flush = true; + frags[0].page = NULL; + goto next; + } + ring->xdp_redirect_fail++; + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; + case XDP_TX: + if (likely(!mlx4_en_xmit_frame(ring, frags, priv, + length, cq_ring, + &doorbell_pending))) { + frags[0].page = NULL; + goto next; + } + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; /* Drop on xmit failure */ + default: + bpf_warn_invalid_xdp_action(dev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + ring->xdp_drop++; +xdp_drop_no_cnt: + goto next; + } + } - if (dev->features & NETIF_F_RXHASH) - gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); + ring->bytes += length; + ring->packets++; - skb_record_rx_queue(gro_skb, cq->ring); + skb = napi_get_frags(&cq->napi); + if (unlikely(!skb)) + goto next; + skb_mark_for_recycle(skb); - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, - skb_hwtstamps(gro_skb), - timestamp); - } + if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { + u64 timestamp = mlx4_en_get_cqe_ts(cqe); - napi_gro_frags(&cq->napi); - goto next; - } + mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb), + timestamp); + } + skb_record_rx_queue(skb, cq_ring); - /* GRO not possible, complete processing here */ + if (likely(dev->features & NETIF_F_RXCSUM)) { + /* TODO: For IP non TCP/UDP packets when csum complete is + * not an option (not supported or any other reason) we can + * actually check cqe IPOK status bit and report + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE + */ + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { + bool l2_tunnel; + + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); ip_summed = CHECKSUM_UNNECESSARY; + hash_type = PKT_HASH_TYPE_L4; + if (l2_tunnel) + skb->csum_level = 1; + ring->csum_ok++; } else { - ip_summed = CHECKSUM_NONE; - ring->csum_none++; + if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY)))) + goto csum_none; + if (check_csum(cqe, skb, va, dev->features)) + goto csum_none; + ip_summed = CHECKSUM_COMPLETE; + hash_type = PKT_HASH_TYPE_L3; + ring->csum_complete++; } } else { +csum_none: ip_summed = CHECKSUM_NONE; + hash_type = PKT_HASH_TYPE_L3; ring->csum_none++; } - - skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); - if (!skb) { - priv->stats.rx_dropped++; - goto next; - } - - if (unlikely(priv->validate_loopback)) { - validate_loopback(priv, skb); - goto next; - } - skb->ip_summed = ip_summed; - skb->protocol = eth_type_trans(skb, dev); - skb_record_rx_queue(skb, cq->ring); - if (dev->features & NETIF_F_RXHASH) - skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); + skb_set_hash(skb, + be32_to_cpu(cqe->immed_rss_invalid), + hash_type); - if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) && + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); - - if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { - timestamp = mlx4_en_get_cqe_ts(cqe); - mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), - timestamp); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(cqe->sl_vid)); + else if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + be16_to_cpu(cqe->sl_vid)); + + nr = mlx4_en_complete_rx_desc(priv, frags, skb, length); + if (likely(nr)) { + skb_shinfo(skb)->nr_frags = nr; + skb->len = length; + skb->data_len = length; + napi_gro_frags(&cq->napi); + } else { + __vlan_hwaccel_clear_tag(skb); + skb_clear_hash(skb); } - - skb_mark_napi_id(skb, &cq->napi); - - /* Push it up the stack */ - netif_receive_skb(skb); - next: - for (nr = 0; nr < priv->num_frags; nr++) - mlx4_en_free_frag(priv, frags, nr); - ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; - if (++polled == budget) - goto out; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; + if (unlikely(++polled == budget)) + break; + } + + if (xdp_redir_flush) + xdp_do_flush(); + + if (likely(polled)) { + if (doorbell_pending) { + priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; + mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]); + } + + mlx4_cq_set_ci(&cq->mcq); + wmb(); /* ensure HW sees CQ consumer before we post new buffers */ + ring->cons = cq->mcq.cons_index; } -out: - AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); - mlx4_cq_set_ci(&cq->mcq); - wmb(); /* ensure HW sees CQ consumer before we post new buffers */ - ring->cons = cq->mcq.cons_index; mlx4_en_refill_rx_buffers(priv, ring); - mlx4_en_update_rx_prod_db(ring); + return polled; } @@ -799,8 +961,8 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq) struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - if (priv->port_up) - napi_schedule(&cq->napi); + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); else mlx4_en_arm_cq(priv, cq); } @@ -811,71 +973,112 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_cq *xdp_tx_cq = NULL; + bool clean_complete = true; int done; - if (!mlx4_en_cq_lock_napi(cq)) - return budget; + if (!budget) + return 0; - done = mlx4_en_process_rx_cq(dev, cq, budget); + if (priv->tx_ring_num[TX_XDP]) { + xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; + if (xdp_tx_cq->xdp_busy) { + clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq, + budget) < budget; + xdp_tx_cq->xdp_busy = !clean_complete; + } + } - mlx4_en_cq_unlock_napi(cq); + done = mlx4_en_process_rx_cq(dev, cq, budget); /* If we used up all the quota - we're probably not done yet... */ - if (done == budget) - INC_PERF_COUNTER(priv->pstats.napi_quota); - else { - /* Done for now */ - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); + if (done == budget || !clean_complete) { + int cpu_curr; + + /* in case we got here because of !clean_complete */ + done = budget; + + cpu_curr = smp_processor_id(); + + if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask))) + return budget; + + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. Need to stop this NAPI + * poll, and restart it on the right CPU. + * Try to avoid returning a too small value (like 0), + * to not fool net_rx_action() and its netdev_budget + */ + if (done) + done--; } + /* Done for now */ + if (likely(napi_complete_done(napi, done))) + mlx4_en_arm_cq(priv, cq); return done; } -static const int frag_sizes[] = { - FRAG_SZ0, - FRAG_SZ1, - FRAG_SZ2, - FRAG_SZ3 -}; - void mlx4_en_calc_rx_buf(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE; - int buf_size = 0; + int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); int i = 0; - while (buf_size < eff_mtu) { - priv->frag_info[i].frag_size = - (eff_mtu > buf_size + frag_sizes[i]) ? - frag_sizes[i] : eff_mtu - buf_size; - priv->frag_info[i].frag_prefix_size = buf_size; - if (!i) { - priv->frag_info[i].frag_align = NET_IP_ALIGN; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES); - } else { - priv->frag_info[i].frag_align = 0; - priv->frag_info[i].frag_stride = - ALIGN(frag_sizes[i], SMP_CACHE_BYTES); + /* bpf requires buffers to be set up as 1 packet per page. + * This only works when num_frags == 1. + */ + if (priv->tx_ring_num[TX_XDP]) { + priv->frag_info[0].frag_size = eff_mtu; + /* This will gain efficient xdp frame recycling at the + * expense of more costly truesize accounting + */ + priv->frag_info[0].frag_stride = PAGE_SIZE; + priv->dma_dir = DMA_BIDIRECTIONAL; + priv->rx_headroom = XDP_PACKET_HEADROOM; + i = 1; + } else { + int frag_size_max = 2048, buf_size = 0; + + /* should not happen, right ? */ + if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048) + frag_size_max = PAGE_SIZE; + + while (buf_size < eff_mtu) { + int frag_stride, frag_size = eff_mtu - buf_size; + int pad, nb; + + if (i < MLX4_EN_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + priv->frag_info[i].frag_size = frag_size; + frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES); + /* We can only pack 2 1536-bytes frames in on 4K page + * Therefore, each frame would consume more bytes (truesize) + */ + nb = PAGE_SIZE / frag_stride; + pad = (PAGE_SIZE - nb * frag_stride) / nb; + pad &= ~(SMP_CACHE_BYTES - 1); + priv->frag_info[i].frag_stride = frag_stride + pad; + + buf_size += frag_size; + i++; } - buf_size += priv->frag_info[i].frag_size; - i++; + priv->dma_dir = DMA_FROM_DEVICE; + priv->rx_headroom = 0; } priv->num_frags = i; priv->rx_skb_size = eff_mtu; priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); - en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " - "num_frags:%d):\n", eff_mtu, priv->num_frags); + en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", + eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - en_err(priv, - " frag:%d - size:%d prefix:%d align:%d stride:%d\n", + en_dbg(DRV, + priv, + " frag:%d - size:%d stride:%d\n", i, priv->frag_info[i].frag_size, - priv->frag_info[i].frag_prefix_size, - priv->frag_info[i].frag_align, priv->frag_info[i].frag_stride); } } @@ -891,7 +1094,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_qp_context *context; int err = 0; - context = kmalloc(sizeof(*context), GFP_KERNEL); + context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) return -ENOMEM; @@ -902,7 +1105,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, } qp->event = mlx4_en_sqp_event; - memset(context, 0, sizeof *context); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn, ring->cqn, -1, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); @@ -910,7 +1112,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, /* Cancel FCS removal if FW allows */ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { context->param3 |= cpu_to_be32(1 << 29); - ring->fcs_del = ETH_FCS_LEN; + if (priv->dev->features & NETIF_F_RXFCS) + ring->fcs_del = 0; + else + ring->fcs_del = ETH_FCS_LEN; } else ring->fcs_del = 0; @@ -930,7 +1135,9 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) int err; u32 qpn; - err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); + err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, + MLX4_RESERVE_A0_QP, + MLX4_RES_USAGE_DRIVER); if (err) { en_err(priv, "Failed reserving drop qpn\n"); return err; @@ -969,14 +1176,15 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) int i, qpn; int err = 0; int good_qps = 0; - static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC, - 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD, - 0x593D56D9, 0xF3253C06, 0x2ADC1FFC}; + u8 flags; en_dbg(DRV, priv, "Configuring rss steering\n"); + + flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0; err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, priv->rx_ring_num, - &rss_map->base_qpn); + &rss_map->base_qpn, flags, + MLX4_RES_USAGE_DRIVER); if (err) { en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); return err; @@ -984,7 +1192,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) for (i = 0; i < priv->rx_ring_num; i++) { qpn = rss_map->base_qpn + i; - err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], + err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], &rss_map->state[i], &rss_map->qps[i]); if (err) @@ -993,15 +1201,29 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) ++good_qps; } + if (priv->rx_ring_num == 1) { + rss_map->indir_qp = &rss_map->qps[0]; + priv->base_qpn = rss_map->indir_qp->qpn; + en_info(priv, "Optimized Non-RSS steering\n"); + return 0; + } + + rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL); + if (!rss_map->indir_qp) { + err = -ENOMEM; + goto rss_err; + } + /* Configure RSS indirection qp */ - err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); - goto rss_err; + goto qp_alloc_err; } - rss_map->indir_qp.event = mlx4_en_sqp_event; + + rss_map->indir_qp->event = mlx4_en_sqp_event; mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, - priv->rx_ring[0].cqn, -1, &context); + priv->rx_ring[0]->cqn, -1, &context); if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) rss_rings = priv->rx_ring_num; @@ -1018,13 +1240,28 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; rss_context->base_qpn_udp = rss_context->default_qpn; } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); + rss_mask |= MLX4_RSS_BY_INNER_HEADERS; + } + rss_context->flags = rss_mask; rss_context->hash_fn = MLX4_RSS_HASH_TOP; - for (i = 0; i < 10; i++) - rss_context->rss_key[i] = cpu_to_be32(rsskey[i]); + if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { + rss_context->hash_fn = MLX4_RSS_HASH_XOR; + } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + memcpy(rss_context->rss_key, priv->rss_key, + MLX4_EN_RSS_KEY_SIZE); + } else { + en_err(priv, "Unknown RSS hash function requested\n"); + err = -EINVAL; + goto indir_err; + } err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, - &rss_map->indir_qp, &rss_map->indir_state); + rss_map->indir_qp, &rss_map->indir_state); if (err) goto indir_err; @@ -1032,9 +1269,12 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) indir_err: mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, - MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); - mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); - mlx4_qp_free(mdev->dev, &rss_map->indir_qp); + MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, rss_map->indir_qp); + mlx4_qp_free(mdev->dev, rss_map->indir_qp); +qp_alloc_err: + kfree(rss_map->indir_qp); + rss_map->indir_qp = NULL; rss_err: for (i = 0; i < good_qps; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], @@ -1052,10 +1292,15 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) struct mlx4_en_rss_map *rss_map = &priv->rss_map; int i; - mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, - MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); - mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); - mlx4_qp_free(mdev->dev, &rss_map->indir_qp); + if (priv->rx_ring_num > 1) { + mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, + MLX4_QP_STATE_RST, NULL, 0, 0, + rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, rss_map->indir_qp); + mlx4_qp_free(mdev->dev, rss_map->indir_qp); + kfree(rss_map->indir_qp); + rss_map->indir_qp = NULL; + } for (i = 0; i < priv->rx_ring_num; i++) { mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], |
