summaryrefslogtreecommitdiff
path: root/net/openvswitch/flow_table.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/openvswitch/flow_table.c')
-rw-r--r--net/openvswitch/flow_table.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4f3b1798e0b2..ffc72a741a50 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -107,16 +107,15 @@ int ovs_flow_tbl_count(const struct flow_table *table)
static void flow_free(struct sw_flow *flow)
{
- int cpu;
+ unsigned int cpu;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
flow->sf_acts);
- /* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
+
+ for_each_cpu(cpu, flow->cpu_used_mask) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
@@ -220,16 +219,13 @@ static struct mask_array *tbl_mask_array_alloc(int size)
struct mask_array *new;
size = max(MASK_ARRAY_SIZE_MIN, size);
- new = kzalloc(sizeof(struct mask_array) +
- sizeof(struct sw_flow_mask *) * size +
+ new = kzalloc(struct_size(new, masks, size) +
sizeof(u64) * size, GFP_KERNEL);
if (!new)
return NULL;
new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
- sizeof(struct mask_array) +
- sizeof(struct sw_flow_mask *) *
- size);
+ struct_size(new, masks, size));
new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
sizeof(u64) * size,