summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEelco Chaudron <echaudro@redhat.com>2020-07-31 14:20:56 +0200
committerDavid S. Miller <davem@davemloft.net>2020-08-03 15:17:48 -0700
commit9d2f627b7ec9d5d3246b6cec17f290ee6778c83b (patch)
tree79d4b5e7a6a6f7c49a37585c3b685788ed68e5fd /net
parentd6526926de7397a97308780911565e31a6b67b59 (diff)
net: openvswitch: add masks cache hit counter
Add a counter that counts the number of masks cache hits, and export it through the megaflow netlink statistics. Reviewed-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Tonghao Zhang <xiangxia.m.yue@gmail.com> Signed-off-by: Eelco Chaudron <echaudro@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/openvswitch/datapath.c5
-rw-r--r--net/openvswitch/datapath.h3
-rw-r--r--net/openvswitch/flow_table.c19
-rw-r--r--net/openvswitch/flow_table.h3
4 files changed, 23 insertions, 7 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6b6822f82f70..f45fee760504 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
+ u32 n_cache_hit;
int error;
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
- &n_mask_hit);
+ &n_mask_hit, &n_cache_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -262,6 +263,7 @@ out:
u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
+ stats->n_cache_hit += n_cache_hit;
u64_stats_update_end(&stats->syncp);
}
@@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
stats->n_missed += local_stats.n_missed;
stats->n_lost += local_stats.n_lost;
mega_stats->n_mask_hit += local_stats.n_mask_hit;
+ mega_stats->n_cache_hit += local_stats.n_cache_hit;
}
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 24fcec22fde2..38f7d3e66ca6 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -38,12 +38,15 @@
* @n_mask_hit: Number of masks looked up for flow match.
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
* up per packet.
+ * @n_cache_hit: The number of received packets that had their mask found using
+ * the mask cache.
*/
struct dp_stats_percpu {
u64 n_hit;
u64 n_missed;
u64 n_lost;
u64 n_mask_hit;
+ u64 n_cache_hit;
struct u64_stats_sync syncp;
};
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index af22c9ee28dd..a5912ea05352 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct mask_array *ma,
const struct sw_flow_key *key,
u32 *n_mask_hit,
+ u32 *n_cache_hit,
u32 *index)
{
u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
@@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u64_stats_update_begin(&ma->syncp);
usage_counters[*index]++;
u64_stats_update_end(&ma->syncp);
+ (*n_cache_hit)++;
return flow;
}
}
@@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 skb_hash,
- u32 *n_mask_hit)
+ u32 *n_mask_hit,
+ u32 *n_cache_hit)
{
struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti);
@@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
int seg;
*n_mask_hit = 0;
+ *n_cache_hit = 0;
if (unlikely(!skb_hash)) {
u32 mask_index = 0;
+ u32 cache = 0;
- return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
+ return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
+ &mask_index);
}
/* Pre and post recirulation flows usually have the same skb_hash
@@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
e = &entries[index];
if (e->skb_hash == skb_hash) {
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
- &e->mask_index);
+ n_cache_hit, &e->mask_index);
if (!flow)
e->skb_hash = 0;
return flow;
@@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
}
/* Cache miss, do full lookup. */
- flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
+ flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
+ &ce->mask_index);
if (flow)
ce->skb_hash = skb_hash;
+ *n_cache_hit = 0;
return flow;
}
@@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
+ u32 __always_unused n_cache_hit;
u32 index = 0;
- return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
+ return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1f664b050e3b..325e939371d8 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
const struct sw_flow_key *,
u32 skb_hash,
- u32 *n_mask_hit);
+ u32 *n_mask_hit,
+ u32 *n_cache_hit);
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
const struct sw_flow_key *);
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,