summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
diff options
context:
space:
mode:
authorRoi Dayan <roid@nvidia.com>2023-05-04 12:14:00 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2023-07-27 11:37:28 -0700
commit88d162b479815f5d6b6a4ff5fdb07aec9dc6280c (patch)
treec6caaa69dac21e47c529a42c6e6cf4c061148ff0 /drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
parent02ceda65f014e357d4142e9c004f40b3b3d61857 (diff)
net/mlx5: Devcom, Infrastructure changes
Update devcom infrastructure to be more generic, without depending on max supported ports definition or a device guid, and also more encapsulated so callers don't need to pass the register devcom component id per event call. Signed-off-by: Eli Cohen <elic@nvidia.com> Signed-off-by: Roi Dayan <roid@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_tc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c36
1 files changed, 15 insertions, 21 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 8d0a3f69693e..22bc88620653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{
struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev;
- struct mlx5_devcom *devcom;
+ struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw;
u16 vhca_id;
int err;
- int i;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err;
rcu_read_lock();
- devcom = out_priv->mdev->priv.devcom;
err = -ENODEV;
- mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
- esw, i) {
+ mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
break;
@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
- struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow);
return;
}
mlx5e_tc_del_fdb_peers_flow(flow);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS);
+ bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
if (!esw_paired)
return false;
@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
- struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
int err;
- int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev);
@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0;
}
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV;
goto clean_flow;
}
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- peer_esw, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err)
goto peer_clean;
}
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow;
return 0;
peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom);
clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow);
return err;
@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
- struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded.
*/
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out;
if (flow_flag_test(flow, DUP)) {
@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
no_peer_counter:
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (esw)
+ mlx5_devcom_for_each_peer_end(esw->devcom);
out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);