summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c337
1 files changed, 112 insertions, 225 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c981fa77f439..2a98375a0abf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
static int
esw_setup_decap_indir(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_table *ft;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
- ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ ft = mlx5_esw_indir_table_get(esw, attr,
mlx5_esw_indir_table_decap_vport(attr), true);
return PTR_ERR_OR_ZERO(ft);
}
@@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
if (mlx5_esw_indir_table_decap_vport(attr))
- mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_put(esw,
mlx5_esw_indir_table_decap_vport(attr),
true);
}
@@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
dest[i].ft = attr->dest_ft;
if (mlx5_esw_indir_table_decap_vport(attr))
- return esw_setup_decap_indir(esw, attr, spec);
+ return esw_setup_decap_indir(esw, attr);
return 0;
}
@@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
- mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false);
}
@@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
bool ignore_flow_lvl,
int *i)
{
@@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
@@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
}
if (mlx5_esw_indir_table_decap_vport(attr)) {
- err = esw_setup_decap_indir(esw, attr, spec);
+ err = esw_setup_decap_indir(esw, attr);
if (err)
goto err_indir_tbl_get;
}
@@ -446,7 +443,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
- mlx5_lag_mpesw_is_activated(esw->dev))
+ mlx5_lag_is_mpesw(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
@@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
if (attr->dest_ft) {
- err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -582,16 +579,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
- /* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
@@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
if (esw_is_indir_table(esw, attr))
- err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
&i);
@@ -832,204 +829,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
-static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
-{
- struct mlx5_eswitch_rep *rep;
- unsigned long i;
- int err = 0;
-
- esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
- if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
- continue;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
- if (err)
- goto out;
- }
-
-out:
- return err;
-}
-
-static struct mlx5_eswitch_rep *
-esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push)
- vport = in_rep;
- else if (pop)
- vport = out_rep;
- else
- vport = in_rep;
-
- return vport;
-}
-
-static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
- bool push, bool pop, bool fwd)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep;
-
- if ((push || pop) && !fwd)
- goto out_notsupp;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push && in_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
- if (!push && !pop && fwd)
- if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* protects against (1) setting rules with different vlans to push and
- * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
- */
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
- goto out_notsupp;
-
- return 0;
-
-out_notsupp:
- return -EOPNOTSUPP;
-}
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- !attr->dest_chain);
-
- mutex_lock(&esw->state_lock);
-
- err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
- if (err)
- goto unlock;
-
- attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
- vport->vlan_refcount++;
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
- }
-
- goto unlock;
- }
-
- if (!push && !pop)
- goto unlock;
-
- if (!(offloads->vlan_push_pop_refcount)) {
- /* it's the 1st vlan rule, apply global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
- offloads->vlan_push_pop_refcount++;
-
- if (push) {
- if (vport->vlan_refcount)
- goto skip_set_push;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
- 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
- if (err)
- goto out;
- vport->vlan = esw_attr->vlan_vid[0];
-skip_set_push:
- vport->vlan_refcount++;
- }
-out:
- if (!err)
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
- mutex_lock(&esw->state_lock);
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
- vport->vlan_refcount--;
-
- goto out;
- }
-
- if (push) {
- vport->vlan_refcount--;
- if (vport->vlan_refcount)
- goto skip_unset_push;
-
- vport->vlan = 0;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
- 0, 0, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
-
-skip_unset_push:
- offloads->vlan_push_pop_refcount--;
- if (offloads->vlan_push_pop_refcount)
- goto out;
-
- /* no more vlan rules, stop global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, 0);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch *from_esw,
@@ -2406,7 +2205,7 @@ static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
kfree(rep);
}
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -2416,7 +2215,7 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
xa_destroy(&esw->offloads.vport_reps);
}
-int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
@@ -2436,6 +2235,94 @@ err:
return err;
}
+static int esw_port_metadata_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err = 0;
+
+ down_write(&esw->mode_lock);
+ if (mlx5_esw_is_fdb_created(esw)) {
+ err = -EBUSY;
+ goto done;
+ }
+ if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ if (ctx->val.vbool)
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ else
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+static int esw_port_metadata_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ return 0;
+}
+
+static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 esw_mode;
+
+ esw_mode = mlx5_eswitch_mode(dev);
+ if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch must either disabled or non switchdev mode");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static const struct devlink_param esw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ esw_port_metadata_get,
+ esw_port_metadata_set,
+ esw_port_metadata_validate),
+};
+
+int esw_offloads_init(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = esw_offloads_init_reps(esw);
+ if (err)
+ return err;
+
+ err = devl_params_register(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ if (err)
+ goto err_params;
+
+ return 0;
+
+err_params:
+ esw_offloads_cleanup_reps(esw);
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+{
+ devl_params_unregister(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ esw_offloads_cleanup_reps(esw);
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -3575,9 +3462,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3675,9 +3562,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3749,9 +3636,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return 0;
}