summaryrefslogtreecommitdiff
path: root/net/openvswitch/flow_table.h
diff options
context:
space:
mode:
authorEelco Chaudron <echaudro@redhat.com>2020-10-17 20:24:51 +0200
committerJakub Kicinski <kuba@kernel.org>2020-10-18 12:29:36 -0700
commitf981fc3d515a588c389242b7e3a71487b40571a5 (patch)
tree63f50c31fb06abe5ba190e3490ddec5c5cd59f2c /net/openvswitch/flow_table.h
parentf355a55f8202811df304de42f59868f2c6810db1 (diff)
net: openvswitch: fix to make sure flow_lookup() is not preempted
The flow_lookup() function uses per CPU variables, which must be called with BH disabled. However, this is fine in the general NAPI use case where the local BH is disabled. But, it's also called from the netlink context. The below patch makes sure that even in the netlink path, the BH is disabled. In addition, u64_stats_update_begin() requires a lock to ensure one writer which is not ensured here. Making it per-CPU and disabling NAPI (softirq) ensures that there is always only one writer. Fixes: eac87c413bf9 ("net: openvswitch: reorder masks array based on usage") Reported-by: Juri Lelli <jlelli@redhat.com> Signed-off-by: Eelco Chaudron <echaudro@redhat.com> Link: https://lore.kernel.org/r/160295903253.7789.826736662555102345.stgit@ebuild Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/openvswitch/flow_table.h')
-rw-r--r--net/openvswitch/flow_table.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index d8fb7a3a3dfd..9e659db78c05 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -38,12 +38,16 @@ struct mask_count {
u64 counter;
};
+struct mask_array_stats {
+ struct u64_stats_sync syncp;
+ u64 usage_cntrs[];
+};
+
struct mask_array {
struct rcu_head rcu;
int count, max;
- u64 __percpu *masks_usage_cntr;
+ struct mask_array_stats __percpu *masks_usage_stats;
u64 *masks_usage_zero_cntr;
- struct u64_stats_sync syncp;
struct sw_flow_mask __rcu *masks[];
};