summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 8a5ae80e6142..b2c1af07c317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -22,6 +22,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5e_icosq *icosq = rq->icosq;
struct mlx5_wq_cyc *wq = &icosq->wq;
struct mlx5e_umr_wqe *umr_wqe;
+ struct xdp_buff **xsk_buffs;
int batch, i;
u32 offset; /* 17-bit value with MTT. */
u16 pi;
@@ -29,9 +30,9 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
goto err;
- BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
- batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
+ xsk_buffs = (struct xdp_buff **)wi->alloc_units.xsk_buffs;
+ batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs,
rq->mpwqe.pages_per_wqe);
/* If batch < pages_per_wqe, either:
@@ -41,8 +42,8 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
* the first error, which will mean there are no more valid descriptors.
*/
for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
- wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool);
- if (unlikely(!wi->alloc_units[batch].xsk))
+ xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!xsk_buffs[batch]))
goto err_reuse_batch;
}
@@ -52,8 +53,8 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
for (i = 0; i < batch; i++) {
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
- dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(xsk_buffs[i]);
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(xsk_buffs[i]);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
@@ -62,8 +63,8 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
}
} else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
for (i = 0; i < batch; i++) {
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
- dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(xsk_buffs[i]);
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(xsk_buffs[i]);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = rq->mkey_be,
@@ -75,8 +76,8 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
for (i = 0; i < batch; i++) {
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
- dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(xsk_buffs[i]);
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(xsk_buffs[i]);
umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
.key = rq->mkey_be,
@@ -102,8 +103,8 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
__be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
for (i = 0; i < batch; i++) {
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
- dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(xsk_buffs[i]);
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(xsk_buffs[i]);
umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
.key = rq->mkey_be,
@@ -149,7 +150,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_reuse_batch:
while (--batch >= 0)
- xsk_buff_free(wi->alloc_units[batch].xsk);
+ xsk_buff_free(xsk_buffs[batch]);
err:
rq->stats->buff_alloc_err++;
@@ -248,7 +249,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units.xsk_buffs[page_idx]);
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */