summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDragos Tatulea <dtatulea@nvidia.com>2022-12-14 15:44:33 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2023-03-28 13:43:58 -0700
commit4a5c5e25008f374e525178f5ba2581cfa3303a0c (patch)
treee927eda609de20f1681695e946832dae9da1cc7d
parent08c9b61b071ca780ac2740b9de755d2ebac2a2e5 (diff)
net/mlx5e: RX, Enable dma map and sync from page_pool allocator
Remove driver dma mapping and unmapping of pages. Let the page_pool api do it. Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
4 files changed, 4 insertions, 27 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index dab00a2c2eb7..04419f56ac85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -65,7 +65,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index c5dae48b7932..5e6ef602c748 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -209,8 +209,6 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
- if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
rq->stats->xdp_redirect++;
return true;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b0322a20b71b..2a73680021c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -733,7 +733,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
int node, struct mlx5e_rq *rq)
{
- struct page_pool_params pp_params = { 0 };
struct mlx5_core_dev *mdev = rq->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -829,12 +828,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
+ struct page_pool_params pp_params = { 0 };
+
pp_params.order = 0;
- pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
pp_params.dma_dir = rq->buff.map_dir;
+ pp_params.max_len = PAGE_SIZE;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 192f12a7d9a9..01c789b89cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -273,40 +273,18 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep)
{
- dma_addr_t addr;
-
*pagep = page_pool_dev_alloc_pages(rq->page_pool);
if (unlikely(!*pagep))
return -ENOMEM;
- /* Non-XSK always uses PAGE_SIZE. */
- addr = dma_map_page(rq->pdev, *pagep, 0, PAGE_SIZE, rq->buff.map_dir);
- if (unlikely(dma_mapping_error(rq->pdev, addr))) {
- page_pool_recycle_direct(rq->page_pool, *pagep);
- *pagep = NULL;
- return -ENOMEM;
- }
- page_pool_set_dma_addr(*pagep, addr);
-
return 0;
}
-void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
-{
- dma_addr_t dma_addr = page_pool_get_dma_addr(page);
-
- dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- page_pool_set_dma_addr(page, 0);
-}
-
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
if (likely(recycle)) {
- mlx5e_page_dma_unmap(rq, page);
page_pool_recycle_direct(rq->page_pool, page);
} else {
- mlx5e_page_dma_unmap(rq, page);
page_pool_release_page(rq->page_pool, page);
put_page(page);
}