summaryrefslogtreecommitdiff
path: root/net/netfilter/nf_conncount.c
diff options
context:
space:
mode:
authorTaehee Yoo <ap420073@gmail.com>2018-11-05 03:43:19 +0900
committerPablo Neira Ayuso <pablo@netfilter.org>2018-11-12 15:47:50 +0100
commitfd3e71a9f71e232181a225301a75936373636ccc (patch)
treef79b57235cfb769655b02d421e4ce762cf2ce09d /net/netfilter/nf_conncount.c
parentccda4af0f4b92f7b4c308d3acc262f4a7e3affad (diff)
netfilter: nf_conncount: use spin_lock_bh instead of spin_lock
conn_free() holds lock with spin_lock() and it is called by both nf_conncount_lookup() and nf_conncount_gc_list(). nf_conncount_lookup() is called from bottom-half context and nf_conncount_gc_list() from process context. So that spin_lock() call is not safe. Hence conn_free() should use spin_lock_bh() instead of spin_lock(). test commands: %nft add table ip filter %nft add chain ip filter input { type filter hook input priority 0\; } %nft add rule filter input meter test { ip saddr ct count over 2 } \ counter splat looks like: [ 461.996507] ================================ [ 461.998999] WARNING: inconsistent lock state [ 461.998999] 4.19.0-rc6+ #22 Not tainted [ 461.998999] -------------------------------- [ 461.998999] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage. [ 461.998999] kworker/0:2/134 [HC0[0]:SC0[0]:HE1:SE1] takes: [ 461.998999] 00000000a71a559a (&(&list->list_lock)->rlock){+.?.}, at: conn_free+0x69/0x2b0 [nf_conncount] [ 461.998999] {IN-SOFTIRQ-W} state was registered at: [ 461.998999] _raw_spin_lock+0x30/0x70 [ 461.998999] nf_conncount_add+0x28a/0x520 [nf_conncount] [ 461.998999] nft_connlimit_eval+0x401/0x580 [nft_connlimit] [ 461.998999] nft_dynset_eval+0x32b/0x590 [nf_tables] [ 461.998999] nft_do_chain+0x497/0x1430 [nf_tables] [ 461.998999] nft_do_chain_ipv4+0x255/0x330 [nf_tables] [ 461.998999] nf_hook_slow+0xb1/0x160 [ ... ] [ 461.998999] other info that might help us debug this: [ 461.998999] Possible unsafe locking scenario: [ 461.998999] [ 461.998999] CPU0 [ 461.998999] ---- [ 461.998999] lock(&(&list->list_lock)->rlock); [ 461.998999] <Interrupt> [ 461.998999] lock(&(&list->list_lock)->rlock); [ 461.998999] [ 461.998999] *** DEADLOCK *** [ 461.998999] [ ... ] Fixes: 5c789e131cbb ("netfilter: nf_conncount: Add list lock and gc worker, and RCU for init tree search") Signed-off-by: Taehee Yoo <ap420073@gmail.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_conncount.c')
-rw-r--r--net/netfilter/nf_conncount.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 02ca7df793f5..71b1f4f99580 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -106,15 +106,15 @@ nf_conncount_add(struct nf_conncount_list *list,
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
- spin_lock(&list->list_lock);
+ spin_lock_bh(&list->list_lock);
if (list->dead == true) {
kmem_cache_free(conncount_conn_cachep, conn);
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_SKIP;
}
list_add_tail(&conn->node, &list->head);
list->count++;
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_ADDED;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,10 +132,10 @@ static bool conn_free(struct nf_conncount_list *list,
{
bool free_entry = false;
- spin_lock(&list->list_lock);
+ spin_lock_bh(&list->list_lock);
if (list->count == 0) {
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
return free_entry;
}
@@ -144,7 +144,7 @@ static bool conn_free(struct nf_conncount_list *list,
if (list->count == 0)
free_entry = true;
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
call_rcu(&conn->rcu_head, __conn_free);
return free_entry;
}