summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorTariq Toukan <tariqt@nvidia.com>2022-10-31 14:18:22 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2022-11-29 21:09:42 -0800
commitb146658f2ed90768b769222ca418617274242b32 (patch)
tree9c7b2b2f5ca44ef7111fc82709ff654c6377149a /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parent12eb0f84a601cec8920b56d7ffd4182a6bfd6521 (diff)
net/mlx5e: Add padding when needed in UMR WQEs
Per the device spec, MTTs/KLMs list in a UMR WQE must be aligned to 64B. Per our SW design, the MTT/KLMs list would need alignment only if it's too small, for example on PPC when PAGE_SIZE is 64KB, and only 4 pages are needed to cover a MPWQE of size 256KB. Padding, if needed, is taken into account when calculating the UMR WQE fields (ds_cnt and xlt_octowords), however no entries are provided, instead garbage is passed. No real harm though, as these parts act as gaps between the RX MPWQEs and not used by any of them. Hence, in practice, device does not try to write any incoming packet to them. Still, prefer providing clean padding marking the end of the list, and do not map garbage into the RQ memory region. Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Reviewed-by: Gal Pressman <gal@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b1ea0b995d9c..8d71736116e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -727,6 +727,17 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
}
+ /* Pad if needed, in case the value set to ucseg->xlt_octowords
+ * in mlx5e_build_umr_wqe() needed alignment.
+ */
+ if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
+ int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
+ rq->mpwqe.pages_per_wqe;
+
+ memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
+ sizeof(*umr_wqe->inline_mtts) * pad);
+ }
+
bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;