summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c682
1 files changed, 457 insertions, 225 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index bdfe609cc9ec..752fb0dfb111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -375,7 +375,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- bool ignore_flow_lvl,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -385,8 +384,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- if (ignore_flow_lvl)
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
@@ -424,10 +422,51 @@ esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 l
mlx5_chains_put_table(chains, chain, prio, level);
}
+static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
+{
+ return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
+}
+
+static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx)
+{
+ if (esw->offloads.ft_ipsec_tx_pol &&
+ esw_attr->dests[attr_idx].rep &&
+ esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+ /* To be aligned with software, encryption is needed only for tunnel device */
+ (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
+ esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+ esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
+ return true;
+
+ return false;
+}
+
+static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ if (!esw->offloads.ft_ipsec_tx_pol)
+ return true;
+
+ for (i = 0; i < esw_attr->split_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
+ return false;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
+ (esw_attr->out_count - esw_attr->split_count > 1))
+ return false;
+
+ return true;
+}
+
static void
-esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
- struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
- int attr_idx, int dest_idx, bool pkt_reformat)
+esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
@@ -449,6 +488,33 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
}
}
+static void
+esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (pkt_reformat &&
+ esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+ else
+ esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+}
+
static int
esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
@@ -469,6 +535,28 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
}
+static bool
+esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+{
+ bool vf_dest = false, pf_dest = false;
+ int i;
+
+ for (i = 0; i < max_dest; i++) {
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK)
+ pf_dest = true;
+ else
+ vf_dest = true;
+
+ if (vf_dest && pf_dest)
+ return true;
+ }
+
+ return false;
+}
+
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -501,7 +589,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
@@ -575,6 +663,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return ERR_PTR(-EOPNOTSUPP);
+ if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
@@ -602,6 +693,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
rule = ERR_PTR(err);
goto err_create_goto_table;
}
+
+ /* Header rewrite with combined wire+loopback in FDB is not allowed */
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
+ esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_warn(esw->dev,
+ "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ rule = ERR_PTR(-EINVAL);
+ goto err_esw_get;
+ }
}
if (esw_attr->decap_pkt_reformat)
@@ -884,6 +984,17 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
+ dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ } else {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = rep->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ }
+
if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
@@ -1436,7 +1547,6 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB;
- attr.fs_base_prio = FDB_TC_OFFLOAD;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
@@ -2391,7 +2501,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2415,7 +2525,7 @@ err_reps:
return err;
}
-void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2425,39 +2535,63 @@ void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
+
+ return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
+}
+
+void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
+}
+
+int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum)
+{
+ return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
+}
+
+void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
+}
+
+int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
- if (vport_num != MLX5_VPORT_UPLINK) {
- err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
- if (err)
- return err;
- }
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport);
+ if (err)
+ return err;
- err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ err = mlx5_esw_offloads_rep_load(esw, vport->vport);
if (err)
goto load_err;
return err;
load_err:
- if (vport_num != MLX5_VPORT_UPLINK)
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
return err;
}
-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
- mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_offloads_rep_unload(esw, vport->vport);
- if (vport_num != MLX5_VPORT_UPLINK)
- mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
@@ -2779,9 +2913,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
{
- u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
+ u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
+ u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
struct mlx5_flow_root_namespace *peer_ns;
- u8 idx = mlx5_get_dev_index(esw->dev);
struct mlx5_flow_root_namespace *ns;
int err;
@@ -2789,18 +2923,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) {
- err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
+ err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
if (err)
return err;
- err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
+ err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
if (err) {
- mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
+ mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
return err;
}
} else {
- mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
- mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
+ mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
+ mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
}
return 0;
@@ -2811,7 +2945,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
@@ -2833,6 +2966,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
+
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_peer;
@@ -2851,7 +2985,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
esw->num_peers++;
peer_esw->num_peers++;
- mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ mlx5_devcom_comp_set_ready(esw->devcom, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
@@ -2861,7 +2995,7 @@ static int mlx5_esw_offloads_devcom_event(int event,
peer_esw->num_peers--;
esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers)
- mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw);
@@ -2886,9 +3020,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
{
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
@@ -2898,38 +3031,44 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
- if (!mlx5_lag_is_supported(esw->dev))
+ if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
+ !mlx5_lag_is_supported(esw->dev))
return;
xa_init(&esw->paired);
- mlx5_devcom_register_component(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- mlx5_esw_offloads_devcom_event,
- esw);
-
esw->num_peers = 0;
- mlx5_devcom_send_event(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
+ esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ key,
+ mlx5_esw_offloads_devcom_event,
+ esw);
+ if (IS_ERR_OR_NULL(esw->devcom))
+ return;
+
+ mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR,
- ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+ ESW_OFFLOADS_DEVCOM_UNPAIR,
+ esw);
}
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- struct mlx5_devcom *devcom = esw->dev->priv.devcom;
-
- if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
- return;
-
- if (!mlx5_lag_is_supported(esw->dev))
+ if (IS_ERR_OR_NULL(esw->devcom))
return;
- mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_UNPAIR,
- ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+ ESW_OFFLOADS_DEVCOM_UNPAIR,
+ esw);
- mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired);
+ esw->devcom = NULL;
+}
+
+bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
+{
+ return mlx5_devcom_comp_is_ready(esw->devcom);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
@@ -3356,7 +3495,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
- err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
+ err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_uplink;
@@ -3367,7 +3506,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
return 0;
err_vports:
- esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
@@ -3405,7 +3544,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_eswitch_disable_pf_vf_vports(esw);
- esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+ mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mapping_destroy(esw->offloads.reg_c0_obj_pool);
@@ -3495,13 +3634,43 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
- esw = mlx5_devlink_eswitch_get(devlink);
+ esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
return net_eq(devl_net, netdev_net);
}
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err;
+
+ if (!mlx5_esw_allowed(esw))
+ return 0;
+
+ /* Take TC into account */
+ err = mlx5_esw_try_lock(esw);
+ if (err < 0)
+ return err;
+
+ esw->offloads.num_block_mode++;
+ mlx5_esw_unlock(esw);
+ return 0;
+}
+
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return;
+
+ down_write(&esw->mode_lock);
+ esw->offloads.num_block_mode--;
+ up_write(&esw->mode_lock);
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3535,6 +3704,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ if (esw->offloads.num_block_mode) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change eswitch mode when IPsec SA and/or policies are configured");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
@@ -3694,38 +3870,28 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- devl_unlock(devlink);
- /* Failure means no eswitch => not possible to change encap */
+ if (!mlx5_esw_allowed(esw))
return true;
- }
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
@@ -3921,38 +4087,6 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum)
-{
- int err;
-
- err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
- if (err)
- return err;
-
- err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
- if (err)
- goto devlink_err;
-
- err = mlx5_esw_offloads_rep_load(esw, vport_num);
- if (err)
- goto rep_err;
- return 0;
-
-rep_err:
- mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
-devlink_err:
- mlx5_esw_vport_disable(esw, vport_num);
- return err;
-}
-
-void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
-{
- mlx5_esw_offloads_rep_unload(esw, vport_num);
- mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
- mlx5_esw_vport_disable(esw, vport_num);
-}
-
static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
{
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -4041,35 +4175,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
-static bool
-is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num) ||
- mlx5_esw_is_sf_vport(esw, vport_num);
-}
-
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return -EOPNOTSUPP;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
ether_addr_copy(hw_addr, vport->info.mac);
@@ -4082,100 +4193,55 @@ int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
- return PTR_ERR(esw);
- }
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num)) {
- NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
- return -EINVAL;
- }
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
- return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
-}
-
-static struct mlx5_vport *
-mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
-{
- u16 vport_num;
-
- if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- return ERR_PTR(-EOPNOTSUPP);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return ERR_PTR(-EOPNOTSUPP);
-
- return mlx5_eswitch_get_vport(esw, vport_num);
+ return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
}
int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
- return err;
+ return -EOPNOTSUPP;
}
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- *is_enabled = vport->info.mig_enabled;
- err = 0;
- }
+ *is_enabled = vport->info.mig_enabled;
mutex_unlock(&esw->state_lock);
- return err;
+ return 0;
}
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
void *query_ctx;
void *hca_caps;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ int err;
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
- return err;
+ return -EOPNOTSUPP;
}
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (!vport->enabled) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- goto out;
- }
if (vport->info.mig_enabled == enable) {
err = 0;
@@ -4196,7 +4262,7 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
+ MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
@@ -4217,56 +4283,37 @@ out:
int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- *is_enabled = vport->info.roce_enabled;
- err = 0;
- }
+ *is_enabled = vport->info.roce_enabled;
mutex_unlock(&esw->state_lock);
- return err;
+ return 0;
}
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
+ struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
+ u16 vport_num = vport->vport;
void *query_ctx;
void *hca_caps;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
+ int err;
- vport = mlx5_devlink_port_fn_get_vport(port, esw);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
}
- vport_num = vport->vport;
mutex_lock(&esw->state_lock);
- if (!vport->enabled) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- goto out;
- }
if (vport->info.roce_enabled == enable) {
err = 0;
@@ -4304,3 +4351,188 @@ out:
mutex_unlock(&esw->state_lock);
return err;
}
+
+int
+mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
+{
+ struct mlx5_flow_destination new_dest = {};
+ struct mlx5_flow_destination old_dest = {};
+
+ if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ return 0;
+
+ esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+ esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+
+ return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
+}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_crypto_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec crypto");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_crypto_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
+ goto unlock;
+ }
+
+ vport->info.ipsec_crypto_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+ bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec packet mode");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_packet_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set IPsec packet mode");
+ goto unlock;
+ }
+
+ vport->info.ipsec_packet_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+#endif /* CONFIG_XFRM_OFFLOAD */