diff options
author | Maxim Mikityanskiy <maximmi@nvidia.com> | 2022-01-27 17:01:05 +0200 |
---|---|---|
committer | Saeed Mahameed <saeedm@nvidia.com> | 2022-03-18 13:51:12 -0700 |
commit | ea5d49bdae8b4c9dcdac574eef96b1bd47000c2a (patch) | |
tree | 3ce3ffde92a4ce8296f1f46497cd88e5035d8334 /drivers/net/ethernet/mellanox/mlx5 | |
parent | d51f4a4cca6f629927b1871af0c6f5ce7a9022e9 (diff) |
net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ
This commit adds XDP multi buffer support to the RX path in the
non-linear legacy RQ mode. mlx5e_xdp_handle is called from
mlx5e_skb_from_cqe_nonlinear.
XDP_TX action for fragmented XDP frames is not yet supported and
blocked.
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 13 |
2 files changed, 18 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 6aa77f0e094e..3a837030e96e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -68,6 +68,11 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, if (unlikely(!xdpf)) return false; + if (unlikely(xdp_frame_has_frags(xdpf))) { + xdp_return_frame(xdpf); + return false; + } + xdptxd.data = xdpf->data; xdptxd.len = xdpf->len; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dd8ff62e1693..e9ad5b3a30ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1572,6 +1572,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_dma_info *di = wi->di; struct skb_shared_info *sinfo; u32 frag_consumed_bytes; + struct bpf_prog *prog; struct xdp_buff xdp; struct sk_buff *skb; u32 truesize; @@ -1582,6 +1583,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, rq->buff.frame0_sz, DMA_FROM_DEVICE); + net_prefetchw(va); /* xdp_frame data area */ net_prefetch(va + rx_headroom); mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp); @@ -1629,6 +1631,17 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, di = head_wi->di; + prog = rcu_dereference(rq->xdp_prog); + if (prog && mlx5e_xdp_handle(rq, di, prog, &xdp)) { + if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { + int i; + + for (i = wi - head_wi; i < rq->wqe.info.num_frags; i++) + mlx5e_put_rx_frag(rq, &head_wi[i], true); + } + return NULL; /* page/packet was consumed by XDP */ + } + skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz, xdp.data - xdp.data_hard_start, xdp.data_end - xdp.data, |