diff options
author | Dragos Tatulea <dtatulea@nvidia.com> | 2023-01-27 16:58:52 +0200 |
---|---|---|
committer | Saeed Mahameed <saeedm@nvidia.com> | 2023-03-28 13:43:57 -0700 |
commit | 8fb1814f58f691373c692df75b761668069e197a (patch) | |
tree | ac3b4e88d7a5dd3c19f665a6567cf3a2fe1d6fee /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
parent | 09df037017799b723d03a97c3f0de069bae9f625 (diff) |
net/mlx5e: RX, Remove alloc unit layout constraint for legacy rq
The mlx5e_alloc_unit union is conveniently used to store arrays of
pointers to struct page or struct xdp_buff (for xsk). The union is
currently expected to have the size of a pointer for xsk batch
allocations to work. This is conveniet for the current state of the
code but makes it impossible to add a structure of a different size
to the alloc unit.
A further patch in the series will add the mlx5e_frag_page struct for
which the described size constraint will no longer hold.
This change removes the usage of mlx5e_alloc_unit union for legacy rq:
- A union of arrays is introduced (mlx5e_alloc_units) to replace the
array of unions to allow structures of different sizes.
- Each fragment has a pointer to a unit in the mlx5e_alloc_units array.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 36300118b6e4..74e7e00cf494 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -371,12 +371,12 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, int err = 0; if (!frag->offset) - /* On first frag (offset == 0), replenish page (alloc_unit actually). - * Other frags that point to the same alloc_unit (with a different + /* On first frag (offset == 0), replenish page. + * Other frags that point to the same page (with a different * offset) should just use the new one without replenishing again * by themselves. */ - err = mlx5e_page_alloc_pool(rq, &frag->au->page); + err = mlx5e_page_alloc_pool(rq, frag->pagep); return err; } @@ -386,7 +386,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, bool recycle) { if (frag->last_in_page) - mlx5e_page_release_dynamic(rq, frag->au->page, recycle); + mlx5e_page_release_dynamic(rq, *frag->pagep, recycle); } static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) @@ -410,7 +410,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, goto free_frags; headroom = i == 0 ? rq->buff.headroom : 0; - addr = page_pool_get_dma_addr(frag->au->page); + addr = page_pool_get_dma_addr(*frag->pagep); wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); } @@ -434,7 +434,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, * put into the Reuse Ring, because there is no way to return * the page to the userspace when the interface goes down. */ - xsk_buff_free(wi->au->xsk); + xsk_buff_free(*wi->xskp); return; } @@ -1587,8 +1587,8 @@ static struct sk_buff * mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) { - union mlx5e_alloc_unit *au = wi->au; u16 rx_headroom = rq->buff.headroom; + struct page *page = *wi->pagep; struct bpf_prog *prog; struct sk_buff *skb; u32 metasize = 0; @@ -1596,11 +1596,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, dma_addr_t addr; u32 frag_size; - va = page_address(au->page) + wi->offset; + va = page_address(page) + wi->offset; data = va + rx_headroom; frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); - addr = page_pool_get_dma_addr(au->page); + addr = page_pool_get_dma_addr(page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, frag_size, rq->buff.map_dir); net_prefetch(data); @@ -1624,7 +1624,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, return NULL; /* queue up for recycling/reuse */ - page_ref_inc(au->page); + page_ref_inc(page); return skb; } @@ -1635,8 +1635,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct mlx5e_wqe_frag_info *head_wi = wi; - union mlx5e_alloc_unit *au = wi->au; u16 rx_headroom = rq->buff.headroom; + struct page *page = *wi->pagep; struct skb_shared_info *sinfo; struct mlx5e_xdp_buff mxbuf; u32 frag_consumed_bytes; @@ -1646,10 +1646,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi u32 truesize; void *va; - va = page_address(au->page) + wi->offset; + va = page_address(page) + wi->offset; frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); - addr = page_pool_get_dma_addr(au->page); + addr = page_pool_get_dma_addr(page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, rq->buff.frame0_sz, rq->buff.map_dir); net_prefetchw(va); /* xdp_frame data area */ @@ -1666,11 +1666,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi while (cqe_bcnt) { skb_frag_t *frag; - au = wi->au; + page = *wi->pagep; frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt); - addr = page_pool_get_dma_addr(au->page); + addr = page_pool_get_dma_addr(page); dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, frag_consumed_bytes, rq->buff.map_dir); @@ -1684,11 +1684,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi } frag = &sinfo->frags[sinfo->nr_frags++]; - __skb_frag_set_page(frag, au->page); + __skb_frag_set_page(frag, page); skb_frag_off_set(frag, wi->offset); skb_frag_size_set(frag, frag_consumed_bytes); - if (page_is_pfmemalloc(au->page)) + if (page_is_pfmemalloc(page)) xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp); sinfo->xdp_frags_size += frag_consumed_bytes; @@ -1717,7 +1717,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi if (unlikely(!skb)) return NULL; - page_ref_inc(head_wi->au->page); + page_ref_inc(*head_wi->pagep); if (xdp_buff_has_frags(&mxbuf.xdp)) { int i; |