summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@redhat.com>2023-01-19 14:15:34 -0800
committerMartin KaFai Lau <martin.lau@kernel.org>2023-01-23 09:58:23 -0800
commit384a13ca8a5d4deab94ef8f7652472db26b4e892 (patch)
tree266cf488a8afd78cd8976053f01a4e87d6c16d48 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parent94ecc5ca4dbf1f01bae6e32f5cd88c0fc5dc3cc9 (diff)
net/mlx5e: Introduce wrapper for xdp_buff
Preparation for implementing HW metadata kfuncs. No functional change. Cc: Tariq Toukan <tariqt@nvidia.com> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Willem de Bruijn <willemb@google.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Anatoly Burakov <anatoly.burakov@intel.com> Cc: Alexander Lobakin <alexandr.lobakin@intel.com> Cc: Magnus Karlsson <magnus.karlsson@gmail.com> Cc: Maryam Tahhan <mtahhan@redhat.com> Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Stanislav Fomichev <sdf@google.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Link: https://lore.kernel.org/r/20230119221536.3349901-16-sdf@google.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c58
1 files changed, 29 insertions, 29 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index c8820ab22169..c6810ca75530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1575,11 +1575,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
-static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
- u32 len, struct xdp_buff *xdp)
+static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, void *va, u16 headroom,
+ u32 len, struct mlx5e_xdp_buff *mxbuf)
{
- xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, true);
+ xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
}
static struct sk_buff *
@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1637,9 +1637,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
+ struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
- struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ mlx5e_fill_mxbuf(rq, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);
- if (!xdp_buff_has_frags(&xdp)) {
+ if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&xdp);
+ xdp_buff_set_frags_flag(&mxbuf.xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page))
- xdp_buff_set_frag_pfmemalloc(&xdp);
+ xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
@@ -1701,7 +1701,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1711,22 +1711,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
- xdp.data - xdp.data_hard_start,
- xdp.data_end - xdp.data,
- xdp.data - xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
+ mxbuf.xdp.data_end - mxbuf.xdp.data,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;
page_ref_inc(au->page);
- if (unlikely(xdp_buff_has_frags(&xdp))) {
+ if (unlikely(xdp_buff_has_frags(&mxbuf.xdp))) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ mlx5e_fill_mxbuf(rq, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, au->page, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);