diff options
author | Dragos Tatulea <dtatulea@nvidia.com> | 2022-12-13 14:37:07 +0200 |
---|---|---|
committer | Saeed Mahameed <saeedm@nvidia.com> | 2023-03-28 13:43:57 -0700 |
commit | 08c9b61b071ca780ac2740b9de755d2ebac2a2e5 (patch) | |
tree | d669b2042b2643a649bf2aa3d76afb5c06cf3816 /drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |
parent | ca6ef9f031946134030e1b583e172c2e47c9a992 (diff) |
net/mlx5e: RX, Remove internal page_cache
This patch removes the internal rx page_cache and uses the generic
page_pool api only. It used to be that the page_pool couldn't handle all
the mlx5 driver usecases, but with the introduction of skb recycling and
page fragmentaton in the page_pool full switch can now be made. Some
benfits of this transition:
* Better page recycling in the cases when the page_cache was suffering
from head of queue blocking. The page_pool doesn't have this issue.
* DMA mapping/unmapping can be managed by the page_pool.
* mlx5e_rq size reduced by more than 50% due to the page_cache array
being deleted.
This patch only removes the page_cache. Downstream patches will enable
the required page_pool features and will add further fine-tuning.
Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rx.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 53 |
1 files changed, 0 insertions, 53 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7057db954f6f..192f12a7d9a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -271,60 +271,10 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem); } -static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) -{ - struct mlx5e_page_cache *cache = &rq->page_cache; - u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); - struct mlx5e_rq_stats *stats = rq->stats; - - if (tail_next == cache->head) { - stats->cache_full++; - return false; - } - - if (!dev_page_is_reusable(page)) { - stats->cache_waive++; - return false; - } - - cache->page_cache[cache->tail] = page; - cache->tail = tail_next; - return true; -} - -static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct page **pagep) -{ - struct mlx5e_page_cache *cache = &rq->page_cache; - struct mlx5e_rq_stats *stats = rq->stats; - dma_addr_t addr; - - if (unlikely(cache->head == cache->tail)) { - stats->cache_empty++; - return false; - } - - if (page_ref_count(cache->page_cache[cache->head]) != 1) { - stats->cache_busy++; - return false; - } - - *pagep = cache->page_cache[cache->head]; - cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); - stats->cache_reuse++; - - addr = page_pool_get_dma_addr(*pagep); - /* Non-XSK always uses PAGE_SIZE. */ - dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir); - return true; -} - static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep) { dma_addr_t addr; - if (mlx5e_rx_cache_get(rq, pagep)) - return 0; - *pagep = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!*pagep)) return -ENOMEM; @@ -353,9 +303,6 @@ void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page) void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle) { if (likely(recycle)) { - if (mlx5e_rx_cache_put(rq, page)) - return; - mlx5e_page_dma_unmap(rq, page); page_pool_recycle_direct(rq->page_pool, page); } else { |