summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
diff options
context:
space:
mode:
authorPaul Blakey <paulb@nvidia.com>2021-04-19 15:50:58 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2021-05-27 11:54:35 -0700
commit7fac5c2eced36f335ee19ff316bd3182fbeda823 (patch)
tree9a6c726c6cc15f9e2dae55078421fd8b9f147c30 /drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
parent74097a0dcd1e47d3ccdd066422f28300ad508eee (diff)
net/mlx5: CT: Avoid reusing modify header context for natted entries
Currently the driver is designed to reuse header modify context entries. Natted entries will always have a unique modify header, as such the modify header hashtable lookup is introducing an overhead. When the hashtable size exceeded 200k entries the tested insertion rate dropped from ~10k entries/sec to ~300 entries/sec. Don't use the re-use mechanism when creating modify headers for natted tuples. Signed-off-by: Paul Blakey <paulb@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c50
1 files changed, 38 insertions, 12 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index edf19f1c19ff..e3b0fd78184e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -150,6 +150,11 @@ struct mlx5_ct_entry {
unsigned long flags;
};
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh);
+
static const struct rhashtable_params cts_ht_params = {
.head_offset = offsetof(struct mlx5_ct_entry, node),
.key_offset = offsetof(struct mlx5_ct_entry, cookie),
@@ -458,8 +463,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@@ -686,15 +690,27 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
- ct_priv->mod_hdr_tbl,
- ct_priv->ns_type,
- &mod_acts);
- if (IS_ERR(*mh)) {
- err = PTR_ERR(*mh);
- goto err_mapping;
+ if (nat) {
+ attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(attr->modify_hdr)) {
+ err = PTR_ERR(attr->modify_hdr);
+ goto err_mapping;
+ }
+
+ *mh = NULL;
+ } else {
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
+ &mod_acts);
+ if (IS_ERR(*mh)) {
+ err = PTR_ERR(*mh);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
dealloc_mod_hdr_actions(&mod_acts);
return 0;
@@ -705,6 +721,17 @@ err_mapping:
return err;
}
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh)
+{
+ if (mh)
+ mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
+ else
+ mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
+}
+
static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
@@ -767,8 +794,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);