summaryrefslogtreecommitdiff
path: root/net/netfilter/nf_flow_table_ip.c
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2021-03-28 23:08:55 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2021-03-31 22:34:11 +0200
commit8b9229d15877ec77775633f058d14145f6eb98fa (patch)
treefce382c7beb58215e6e6ad99bc8b36a8999fda87 /net/netfilter/nf_flow_table_ip.c
parent5c701e71961af0ec8227ea615f1646dbe98aea1a (diff)
netfilter: flowtable: dst_check() from garbage collector path
Move dst_check() to the garbage collector path. Stale routes trigger the flow entry teardown state which makes affected flows go back to the classic forwarding path to re-evaluate flow offloading. IPv6 requires the dst cookie to work, store it in the flow_tuple, otherwise dst_check() always fails. Fixes: e5075c0badaa ("netfilter: flowtable: call dst_check() to fall back to classic forwarding") Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter/nf_flow_table_ip.c')
-rw-r--r--net/netfilter/nf_flow_table_ip.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 12cb0cc6958c..889cf88d3dba 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -364,15 +364,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
return NF_ACCEPT;
- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
- if (!dst_check(&rt->dst, 0)) {
- flow_offload_teardown(flow);
- return NF_ACCEPT;
- }
- }
-
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
@@ -391,6 +382,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ rt = (struct rtable *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
@@ -399,6 +391,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
+ rt = (struct rtable *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -607,15 +600,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
return NF_ACCEPT;
- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
- if (!dst_check(&rt->dst, 0)) {
- flow_offload_teardown(flow);
- return NF_ACCEPT;
- }
- }
-
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
@@ -633,6 +617,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
@@ -641,6 +626,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
+ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);