summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
diff options
context:
space:
mode:
authorPaul Blakey <paulb@nvidia.com>2023-02-18 00:36:20 +0200
committerJakub Kicinski <kuba@kernel.org>2023-02-20 16:46:10 -0800
commit6702782845a5bf381a19b204c369e63420041665 (patch)
tree35d3a3a7992301b0ef4bbe9baaf1f9e1f203db60 /drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
parent235ff07da7ec2e5addcee68a366ac987571b4d7c (diff)
net/mlx5e: TC, Set CT miss to the specific ct action instance
Currently, CT misses restore the missed chain on the tc skb extension so tc will continue from the relevant chain. Instead, restore the CT action's miss cookie on the extension, which will instruct tc to continue from the this specific CT action instance on the relevant filter's action list. Map the CT action's miss_cookie to a new miss object (ACT_MISS), and use this miss mapping instead of the current chain miss object (CHAIN_MISS) for CT action misses. To restore this new miss mapping value, add a RX restore rule for each such mapping value. Signed-off-by: Paul Blakey <paulb@nvidia.com> Reviewed-by: Roi Dayan <roid@nvidia.com> Reviewed-by: Oz Sholmo <ozsh@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_tc.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c64
1 files changed, 57 insertions, 7 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2dd0bd54c3cd..e34d9b5fb504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3816,6 +3816,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+ attr2->act_id_restore_rule = NULL;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
@@ -5699,14 +5700,19 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
return true;
}
-static bool mlx5e_tc_restore_skb_chain(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
- u32 chain, u32 zone_restore_id,
- u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
+static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
+ u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct tc_skb_ext *tc_skb_ext;
+ u64 act_miss_cookie;
+ u32 chain;
- if (chain) {
+ chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
+ act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
+ mapped_obj->act_miss_cookie : 0;
+ if (chain || act_miss_cookie) {
if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
return false;
@@ -5716,7 +5722,12 @@ static bool mlx5e_tc_restore_skb_chain(struct sk_buff *skb, struct mlx5_tc_ct_pr
return false;
}
- tc_skb_ext->chain = chain;
+ if (act_miss_cookie) {
+ tc_skb_ext->act_miss_cookie = act_miss_cookie;
+ tc_skb_ext->act_miss = 1;
+ } else {
+ tc_skb_ext->chain = chain;
+ }
}
if (tc_priv)
@@ -5786,8 +5797,9 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
switch (mapped_obj.type) {
case MLX5_MAPPED_OBJ_CHAIN:
- return mlx5e_tc_restore_skb_chain(skb, ct_priv, mapped_obj.chain, zone_restore_id,
- tunnel_id, tc_priv);
+ case MLX5_MAPPED_OBJ_ACT_MISS:
+ return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
+ tunnel_id, tc_priv);
case MLX5_MAPPED_OBJ_SAMPLE:
mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
tc_priv->skb_done = true;
@@ -5821,3 +5833,41 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
0, NULL);
}
+
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mapping_ctx *ctx;
+ int err;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
+ mapped_obj.act_miss_cookie = act_miss_cookie;
+ err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
+ if (err)
+ return err;
+
+ attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
+ if (IS_ERR(attr->act_id_restore_rule))
+ goto err_rule;
+
+ return 0;
+
+err_rule:
+ mapping_remove(ctx, *act_miss_mapping);
+ return err;
+}
+
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
+ mapping_remove(ctx, act_miss_mapping);
+}