summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
diff options
context:
space:
mode:
authorPetr Machata <petrm@mellanox.com>2018-09-20 09:21:31 +0300
committerDavid S. Miller <davem@davemloft.net>2018-09-20 07:46:01 -0700
commit5be3637e069fe70a9ba6094970edfabaae1d0f1c (patch)
tree4b0ed3333f24428eac081f8adf1fd256baf888c9 /drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
parent41057e28288b89da91d535ff3244bfcf90ebc97c (diff)
mlxsw: spectrum_buffers: Allow configuration of static pools
Some pools configured through the sb_pm entries may have by default static size. The MC pool is now not explicitly configured, however it gets configured as static implicitly by 0-initializing sb->prs, and a follow-up patch adds an explicit configuration to the same effect. To support this, pass max_buff taken from sb_pm and sb_cm entries through cell conversion before handing it to mlxsw_sp_sb_pm_write(), if the pool that the sb_pm entry configures is statically-sized. To keep current behavior, update mlxsw_sp_sb_cms_egress[] to denote buffer sizes in bytes (assuming Spectrum 1 cell sizes, which the original code assumed as well) instead of cells. Note that a follow-up patch changes this to infinite size. Also tweak a comment at SBMM configuration to remain true now that statically-sized pools exist. Signed-off-by: Petr Machata <petrm@mellanox.com> Reviewed-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: Ido Schimmel <idosch@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c50
1 files changed, 33 insertions, 17 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 021b7c219f4e..c7b453f7881c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -380,14 +380,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
MLXSW_SP_SB_CM(1500, 9, 4),
MLXSW_SP_SB_CM(1500, 9, 4),
MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
- MLXSW_SP_SB_CM(0, 140000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
+ MLXSW_SP_SB_CM(0, 13440000, 8),
MLXSW_SP_SB_CM(1, 0xff, 4),
};
@@ -433,6 +433,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+static bool
+mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
+
+ return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
+}
+
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_cm *cms,
@@ -444,6 +452,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
u32 min_buff;
+ u32 max_buff;
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
@@ -451,18 +460,21 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
continue;
- /* All pools are initialized using dynamic thresholds,
- * therefore 'max_buff' isn't specified in cells.
- */
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
- if (cm->max_buff == MLXSW_SP_SB_INFI)
+ max_buff = cm->max_buff;
+ if (max_buff == MLXSW_SP_SB_INFI) {
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
min_buff, 0,
true, cm->pool_index);
- else
+ } else {
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
+ cm->pool_index))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
+ max_buff);
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
- min_buff, cm->max_buff,
+ min_buff, max_buff,
false, cm->pool_index);
+ }
if (err)
return err;
}
@@ -523,11 +535,15 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
+ u32 max_buff;
u32 min_buff;
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
+ max_buff = pm->max_buff;
+ if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
+ max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
- i, min_buff, pm->max_buff);
+ i, min_buff, max_buff);
if (err)
return err;
}
@@ -580,8 +596,8 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
mc = &mlxsw_sp_sb_mms[i];
des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
- /* All pools are initialized using dynamic thresholds,
- * therefore 'max_buff' isn't specified in cells.
+ /* All pools used by sb_mm's are initialized using dynamic
+ * thresholds, therefore 'max_buff' isn't specified in cells.
*/
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,