summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2022-12-14 15:44:33 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2023-03-28 13:43:58 -0700
commit4a5c5e25008f374e525178f5ba2581cfa3303a0c (patch)
treee927eda609de20f1681695e946832dae9da1cc7d /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
parent08c9b61b071ca780ac2740b9de755d2ebac2a2e5 (diff)
net/mlx5e: RX, Enable dma map and sync from page_pool allocator
Remove driver dma mapping and unmapping of pages. Let the page_pool api do it. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
1 files changed, 0 insertions, 22 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 192f12a7d9a9..01c789b89cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -273,40 +273,18 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep)
{
- dma_addr_t addr;
-
*pagep = page_pool_dev_alloc_pages(rq->page_pool);
if (unlikely(!*pagep))
return -ENOMEM;
- /* Non-XSK always uses PAGE_SIZE. */
- addr = dma_map_page(rq->pdev, *pagep, 0, PAGE_SIZE, rq->buff.map_dir);
- if (unlikely(dma_mapping_error(rq->pdev, addr))) {
- page_pool_recycle_direct(rq->page_pool, *pagep);
- *pagep = NULL;
- return -ENOMEM;
- }
- page_pool_set_dma_addr(*pagep, addr);
-
return 0;
}
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
-{
- dma_addr_t dma_addr = page_pool_get_dma_addr(page);
-
- dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- page_pool_set_dma_addr(page, 0);
-}
-
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
- mlx5e_page_dma_unmap(rq, page);
page_pool_recycle_direct(rq->page_pool, page);
} else {
- mlx5e_page_dma_unmap(rq, page);
page_pool_release_page(rq->page_pool, page);
put_page(page);
}