summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2023-01-30 12:02:45 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2023-03-28 13:43:57 -0700
commitd39092caaedf89c67170c035cad87c79988d1527 (patch)
treeb62aa01cd1ca86c488cf09013e804ea24f811fd4 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parent8fb1814f58f691373c692df75b761668069e197a (diff)
net/mlx5e: RX, Remove alloc unit layout constraint for striding rq
This change removes the usage of mlx5e_alloc_unit union for striding rq. The change is more straightforward than legacy rq as the alloc units union is already in place. This patch only moves things around: instead of an array of unions make it a union of arrays. This has the effect that each mlx5e_mpw_info will allocate the largest possible size of the array member. For xsk this means that the array of xdp_buff pointers for the wqe will still be contiguous, but there will be some extra unused bytes at the end of the array. As further patch in the series will add the mlx5e_frag_page struct for which the described size constraint will no longer hold. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 74e7e00cf494..d02f2f2af4ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -469,16 +469,16 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
+ struct page *page, u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(au->page);
+ dma_addr_t addr = page_pool_get_dma_addr(page);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- page_ref_inc(au->page);
+ page_ref_inc(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- au->page, frag_offset, len, truesize);
+ page, frag_offset, len, truesize);
}
static inline void
@@ -498,7 +498,6 @@ mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
- union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
int i;
@@ -509,17 +508,21 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle
no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
if (rq->xsk_pool) {
+ struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
+
/* The `recycle` parameter is ignored, and the page is always
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
*/
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- xsk_buff_free(alloc_units[i].xsk);
+ xsk_buff_free(xsk_buffs[i]);
} else {
+ struct page **pages = wi->alloc_units.pages;
+
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle);
+ mlx5e_page_release_dynamic(rq, pages[i], recycle);
}
}
@@ -694,7 +697,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
- union mlx5e_alloc_unit *au = &wi->alloc_units[0];
+ struct page **pagep = &wi->alloc_units.pages[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
@@ -713,13 +716,13 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
- for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, pagep++) {
dma_addr_t addr;
- err = mlx5e_page_alloc_pool(rq, &au->page);
+ err = mlx5e_page_alloc_pool(rq, pagep);
if (unlikely(err))
goto err_unmap;
- addr = page_pool_get_dma_addr(au->page);
+ addr = page_pool_get_dma_addr(*pagep);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
@@ -760,8 +763,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
- au--;
- mlx5e_page_release_dynamic(rq, au->page, true);
+ pagep--;
+ mlx5e_page_release_dynamic(rq, *pagep, true);
}
err:
@@ -1914,7 +1917,7 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
static void
mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
+ struct page **pagep, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
@@ -1928,12 +1931,12 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
else
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_frag(rq, skb, au, data_offset,
+ mlx5e_add_skb_frag(rq, skb, *pagep, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
- au++;
+ pagep++;
}
}
@@ -1942,11 +1945,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
u32 page_idx)
{
- union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
+ struct page **pagep = &wi->alloc_units.pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
- union mlx5e_alloc_unit *head_au = au;
+ struct page *head_page = *pagep;
struct sk_buff *skb;
dma_addr_t addr;
@@ -1961,14 +1964,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
if (unlikely(frag_offset >= PAGE_SIZE)) {
- au++;
+ pagep++;
frag_offset -= PAGE_SIZE;
}
- mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
+ mlx5e_fill_skb_data(skb, rq, pagep, byte_cnt, frag_offset);
/* copy header */
- addr = page_pool_get_dma_addr(head_au->page);
- mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
+ addr = page_pool_get_dma_addr(head_page);
+ mlx5e_copy_skb_header(rq, skb, head_page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -1982,7 +1985,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
u32 page_idx)
{
- union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
+ struct page *page = wi->alloc_units.pages[page_idx];
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1997,11 +2000,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(au->page) + head_offset;
+ va = page_address(page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(au->page);
+ addr = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
@@ -2028,7 +2031,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(au->page);
+ page_ref_inc(page);
return skb;
}
@@ -2146,7 +2149,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
- union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
@@ -2196,8 +2198,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- au = &wi->alloc_units[page_idx];
- mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset);
+ struct page **pagep = &wi->alloc_units.pages[page_idx];
+
+ mlx5e_fill_skb_data(*skb, rq, pagep, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);