summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2022-03-01 00:46:19 +0100
committerFlorian Westphal <fw@strlen.de>2022-03-01 11:51:15 +0100
commit3b836da4081fa585cf6c392f62557496f2cb0efe (patch)
treef2dcb351b8b08fd020d00958cb10ac9c50c91804 /net
parentc3873070247d9e3c7a6b0cf9bf9b45e8018427b1 (diff)
netfilter: nf_queue: handle socket prefetch
In case someone combines bpf socket assign and nf_queue, then we will queue an skb who references a struct sock that did not have its reference count incremented. As we leave rcu protection, there is no guarantee that skb->sk is still valid. For refcount-less skb->sk case, try to increment the reference count and then override the destructor. In case of failure we have two choices: orphan the skb and 'delete' preselect or let nf_queue() drop the packet. Do the latter, it should not happen during normal operation. Fixes: cf7fbe660f2d ("bpf: Add socket assign support") Acked-by: Joe Stringer <joe@cilium.io> Signed-off-by: Florian Westphal <fw@strlen.de>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_queue.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index e39549c55945..63d1516816b1 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -180,6 +180,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
break;
}
+ if (skb_sk_is_prefetched(skb)) {
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk)) {
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ return -ENOTCONN;
+
+ /* drop refcount on skb_orphan */
+ skb->destructor = sock_edemux;
+ }
+ }
+
entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
if (!entry)
return -ENOMEM;