summaryrefslogtreecommitdiff
path: root/net/netfilter/nf_conntrack_netlink.c
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2022-04-11 13:01:18 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2022-05-13 18:52:16 +0200
commit1397af5bfd7d32b0cf2adb70a78c9a9e8f11d912 (patch)
tree11e63b8c7924ec751ed48e9dc73c2a7ca0b5245d /net/netfilter/nf_conntrack_netlink.c
parent0d3cc504ba9cdcff76346306c37eb1ea01e60a86 (diff)
netfilter: conntrack: remove the percpu dying list
Its no longer needed. Entries that need event redelivery are placed on the new pernet dying list. The advantage is that there is no need to take additional spinlock on conntrack removal unless event redelivery failed or the conntrack entry was never added to the table in the first place (confirmed bit not set). The IPS_CONFIRMED bit now needs to be set as soon as the entry has been unlinked from the unconfirmed list, else the destroy function may attempt to unlink it a second time. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_conntrack_netlink.c')
-rw-r--r--net/netfilter/nf_conntrack_netlink.c23
1 files changed, 7 insertions, 16 deletions
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index a4ec2aad2187..2e9c8183e4a2 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -62,7 +62,6 @@ struct ctnetlink_list_dump_ctx {
struct nf_conn *last;
unsigned int cpu;
bool done;
- bool retrans_done;
};
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
@@ -1751,13 +1750,12 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
}
static int
-ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
+ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- struct hlist_nulls_head *list;
struct net *net = sock_net(skb->sk);
int res, cpu;
@@ -1774,12 +1772,11 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
- list = dying ? &pcpu->dying : &pcpu->unconfirmed;
restart:
- hlist_nulls_for_each_entry(h, n, list, hnnode) {
+ hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
- res = ctnetlink_dump_one_entry(skb, cb, ct, dying);
+ res = ctnetlink_dump_one_entry(skb, cb, ct, false);
if (res < 0) {
ctx->cpu = cpu;
spin_unlock_bh(&pcpu->lock);
@@ -1812,8 +1809,8 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n;
#endif
- if (ctx->retrans_done)
- return ctnetlink_dump_list(skb, cb, true);
+ if (ctx->done)
+ return 0;
ctx->last = NULL;
@@ -1842,10 +1839,10 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
spin_unlock_bh(&ecache_net->dying_lock);
#endif
+ ctx->done = true;
nf_ct_put(last);
- ctx->retrans_done = true;
- return ctnetlink_dump_list(skb, cb, true);
+ return skb->len;
}
static int ctnetlink_get_ct_dying(struct sk_buff *skb,
@@ -1863,12 +1860,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb,
return -EOPNOTSUPP;
}
-static int
-ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return ctnetlink_dump_list(skb, cb, false);
-}
-
static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])