summaryrefslogtreecommitdiff
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
authorJarno Rajahalme <jrajahalme@nicira.com>2014-03-27 12:35:23 -0700
committerJesse Gross <jesse@nicira.com>2014-05-16 13:40:29 -0700
commit23dabf88abb48a866fdb19ee08ebcf1ddd9b1840 (patch)
treecc835b9d88c36d7b3b0c296fb2617f9971c5bd7e /net/openvswitch/flow_table.c
parent8c63ff09bddf944ab0033fea97aacfadfffa76de (diff)
openvswitch: Remove 5-tuple optimization.
The 5-tuple optimization becomes unnecessary with a later per-NUMA node stats patch. Remove it first to make the changes easier to grasp. Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com> Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 1ba1e0b8ade5..aa92da23053d 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -72,7 +72,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
*d++ = *s++ & *m++;
}
-struct sw_flow *ovs_flow_alloc(bool percpu_stats)
+struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
int cpu;
@@ -84,25 +84,15 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
flow->sf_acts = NULL;
flow->mask = NULL;
- flow->stats.is_percpu = percpu_stats;
+ flow->stats = alloc_percpu(struct flow_stats);
+ if (!flow->stats)
+ goto err;
- if (!percpu_stats) {
- flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
- if (!flow->stats.stat)
- goto err;
+ for_each_possible_cpu(cpu) {
+ struct flow_stats *cpu_stats;
- spin_lock_init(&flow->stats.stat->lock);
- } else {
- flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
- if (!flow->stats.cpu_stats)
- goto err;
-
- for_each_possible_cpu(cpu) {
- struct flow_stats *cpu_stats;
-
- cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
- spin_lock_init(&cpu_stats->lock);
- }
+ cpu_stats = per_cpu_ptr(flow->stats, cpu);
+ spin_lock_init(&cpu_stats->lock);
}
return flow;
err:
@@ -141,10 +131,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
kfree((struct sf_flow_acts __force *)flow->sf_acts);
- if (flow->stats.is_percpu)
- free_percpu(flow->stats.cpu_stats);
- else
- kfree(flow->stats.stat);
+ free_percpu(flow->stats);
kmem_cache_free(flow_cache, flow);
}