summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/steering
diff options
context:
space:
mode:
authorYevgeny Kliteynik <kliteyn@nvidia.com>2022-11-08 17:45:03 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2023-04-20 18:35:48 -0700
commit72b2cff68405e91ee5e772385b68eb4442bcbf43 (patch)
tree5574ce696d2159a791972979fdbd925647d2d463 /drivers/net/ethernet/mellanox/mlx5/core/steering
parent075056005d8cceecbbb8e054d8e3cd2b7519d9c6 (diff)
net/mlx5: DR, Calculate sync threshold of each pool according to its type
When certain ICM chunk is no longer needed, it needs to be freed. Fully freeing ICM memory involves issuing FW SYNC_STEERING command. This is very time consuming, and it is impractical to do it for every freed chunk. Instead, we manage these 'freed' chunks in hot list (list of chunks that are not required by SW any more, but HW might still access them). When size of the hot list reaches certain threshold, we purge it and issue SYNC_STEERING FW command. There is one threshold for all the different ICM types, which is not optimal, as different ICM types require different approach: STEs pool is very large, and it is very 'dynamic' in its nature, so letting hot list to become too large will result in a significant perf hiccup when purging the hot list. Modify action is much smaller and less dynamic, so we can let the hot list to grow to almost the size of the whole pool. This patch fixes this problem: instead of having same hot memory threshold for all the pools, sync operation will be triggered in accordance with the ICM type. Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/steering')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 04fc170a6c16..19e9b4d78454 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,7 +4,9 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_POOL_HOT_MEMORY_FRACTION 4
+#define DR_ICM_POOL_STE_HOT_MEM_PERCENT 25
+#define DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT 50
+#define DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT 90
struct mlx5dr_icm_hot_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
@@ -29,6 +31,8 @@ struct mlx5dr_icm_pool {
struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
u32 hot_chunks_num;
u64 hot_memory_size;
+ /* hot memory size threshold for triggering sync */
+ u64 th;
};
struct mlx5dr_icm_dm {
@@ -330,15 +334,7 @@ dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- int allow_hot_size;
-
- /* sync when hot memory reaches a certain fraction of the pool size */
- allow_hot_size =
- mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) /
- DR_ICM_POOL_HOT_MEMORY_FRACTION;
-
- return pool->hot_memory_size > allow_hot_size;
+ return pool->hot_memory_size > pool->th;
}
static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
@@ -503,8 +499,9 @@ void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_h
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type)
{
- u32 num_of_chunks, entry_size, max_hot_size;
+ u32 num_of_chunks, entry_size;
struct mlx5dr_icm_pool *pool;
+ u32 max_hot_size = 0;
pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
@@ -520,12 +517,21 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
switch (icm_type) {
case DR_ICM_TYPE_STE:
pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
+ max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) *
+ DR_ICM_POOL_STE_HOT_MEM_PERCENT / 100;
break;
case DR_ICM_TYPE_MODIFY_ACTION:
pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
+ max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) *
+ DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT / 100;
break;
case DR_ICM_TYPE_MODIFY_HDR_PTRN:
pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
+ max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) *
+ DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT / 100;
break;
default:
WARN_ON(icm_type);
@@ -533,11 +539,8 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
- max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) /
- DR_ICM_POOL_HOT_MEMORY_FRACTION;
-
num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
+ pool->th = max_hot_size;
pool->hot_chunks_arr = kvcalloc(num_of_chunks,
sizeof(struct mlx5dr_icm_hot_chunk),