summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/filter.c161
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dsa/tag_ksz.c2
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/nexthop.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/mpls/mpls_iptunnel.c1
-rw-r--r--net/mptcp/Kconfig6
-rw-r--r--net/mptcp/options.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c19
-rw-r--r--net/netfilter/nf_dup_netdev.c1
-rw-r--r--net/netfilter/nf_tables_api.c6
-rw-r--r--net/netfilter/nf_tables_offload.c4
-rw-r--r--net/netfilter/nft_fwd_netdev.c1
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/openvswitch/flow_table.c58
-rw-r--r--net/openvswitch/flow_table.h8
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/tipc/bcast.c10
28 files changed, 212 insertions, 127 deletions
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 12a4f4d93681..3fda71a8579d 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -21,7 +21,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
- if (skb_ensure_writable(skb, ETH_ALEN))
+ if (skb_ensure_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 0cad62a4052b..307790562b49 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -21,7 +21,7 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_redirect_info *info = par->targinfo;
- if (skb_ensure_writable(skb, ETH_ALEN))
+ if (skb_ensure_writable(skb, 0))
return EBT_DROP;
if (xt_hooknum(par) != NF_BR_BROUTING)
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 27443bf229a3..7dfbcdfc30e5 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -22,7 +22,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
- if (skb_ensure_writable(skb, ETH_ALEN * 2))
+ if (skb_ensure_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
diff --git a/net/core/dev.c b/net/core/dev.c
index 751e5264fd49..82dc6b48e45f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -145,6 +145,7 @@
#include <linux/indirect_call_wrapper.h>
#include <net/devlink.h>
#include <linux/pm_runtime.h>
+#include <linux/prandom.h>
#include "net-sysfs.h"
@@ -3558,6 +3559,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(skb, dev);
len = skb->len;
+ PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
trace_net_dev_start_xmit(skb, dev);
rc = netdev_start_xmit(skb, dev, txq, more);
trace_net_dev_xmit(skb, rc, dev, len);
@@ -4130,6 +4132,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (!skb)
goto out;
+ PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -4195,6 +4198,7 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
skb_set_queue_mapping(skb, queue_id);
txq = skb_get_tx_queue(dev, skb);
+ PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
local_bh_disable();
@@ -10213,7 +10217,7 @@ void netdev_run_todo(void)
struct net_device *dev = list_first_entry(&unlink_list,
struct net_device,
unlink_list);
- list_del(&dev->unlink_list);
+ list_del_init(&dev->unlink_list);
dev->nested_level = dev->lower_level - 1;
}
#endif
diff --git a/net/core/filter.c b/net/core/filter.c
index c5e2a1c5fd8d..2ca5eecebacf 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2165,12 +2165,12 @@ static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
-static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb)
+static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
+ struct net_device *dev, struct bpf_nh_params *nh)
{
- struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst->dev;
u32 hh_len = LL_RESERVED_SPACE(dev);
const struct in6_addr *nexthop;
+ struct dst_entry *dst = NULL;
struct neighbour *neigh;
if (dev_xmit_recursion()) {
@@ -2196,8 +2196,13 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb)
}
rcu_read_lock_bh();
- nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
- &ipv6_hdr(skb)->daddr);
+ if (!nh) {
+ dst = skb_dst(skb);
+ nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
+ &ipv6_hdr(skb)->daddr);
+ } else {
+ nexthop = &nh->ipv6_nh;
+ }
neigh = ip_neigh_gw6(dev, nexthop);
if (likely(!IS_ERR(neigh))) {
int ret;
@@ -2210,36 +2215,43 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb)
return ret;
}
rcu_read_unlock_bh();
- IP6_INC_STATS(dev_net(dst->dev),
- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ if (dst)
+ IP6_INC_STATS(dev_net(dst->dev),
+ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
out_drop:
kfree_skb(skb);
return -ENETDOWN;
}
-static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
+static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+ struct bpf_nh_params *nh)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct net *net = dev_net(dev);
int err, ret = NET_XMIT_DROP;
- struct dst_entry *dst;
- struct flowi6 fl6 = {
- .flowi6_flags = FLOWI_FLAG_ANYSRC,
- .flowi6_mark = skb->mark,
- .flowlabel = ip6_flowinfo(ip6h),
- .flowi6_oif = dev->ifindex,
- .flowi6_proto = ip6h->nexthdr,
- .daddr = ip6h->daddr,
- .saddr = ip6h->saddr,
- };
- dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
- if (IS_ERR(dst))
- goto out_drop;
+ if (!nh) {
+ struct dst_entry *dst;
+ struct flowi6 fl6 = {
+ .flowi6_flags = FLOWI_FLAG_ANYSRC,
+ .flowi6_mark = skb->mark,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_oif = dev->ifindex,
+ .flowi6_proto = ip6h->nexthdr,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ };
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
+ if (IS_ERR(dst))
+ goto out_drop;
- skb_dst_set(skb, dst);
+ skb_dst_set(skb, dst);
+ } else if (nh->nh_family != AF_INET6) {
+ goto out_drop;
+ }
- err = bpf_out_neigh_v6(net, skb);
+ err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
@@ -2252,7 +2264,8 @@ out_xmit:
return ret;
}
#else
-static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
+static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
+ struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
@@ -2260,11 +2273,9 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
#endif /* CONFIG_IPV6 */
#if IS_ENABLED(CONFIG_INET)
-static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb)
+static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
+ struct net_device *dev, struct bpf_nh_params *nh)
{
- struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = container_of(dst, struct rtable, dst);
- struct net_device *dev = dst->dev;
u32 hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
bool is_v6gw = false;
@@ -2292,7 +2303,21 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb)
}
rcu_read_lock_bh();
- neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ if (!nh) {
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = container_of(dst, struct rtable, dst);
+
+ neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ } else if (nh->nh_family == AF_INET6) {
+ neigh = ip_neigh_gw6(dev, &nh->ipv6_nh);
+ is_v6gw = true;
+ } else if (nh->nh_family == AF_INET) {
+ neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
+ } else {
+ rcu_read_unlock_bh();
+ goto out_drop;
+ }
+
if (likely(!IS_ERR(neigh))) {
int ret;
@@ -2309,33 +2334,37 @@ out_drop:
return -ENETDOWN;
}
-static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
+static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+ struct bpf_nh_params *nh)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net *net = dev_net(dev);
int err, ret = NET_XMIT_DROP;
- struct rtable *rt;
- struct flowi4 fl4 = {
- .flowi4_flags = FLOWI_FLAG_ANYSRC,
- .flowi4_mark = skb->mark,
- .flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_oif = dev->ifindex,
- .flowi4_proto = ip4h->protocol,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
- };
- rt = ip_route_output_flow(net, &fl4, NULL);
- if (IS_ERR(rt))
- goto out_drop;
- if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
- ip_rt_put(rt);
- goto out_drop;
- }
+ if (!nh) {
+ struct flowi4 fl4 = {
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_oif = dev->ifindex,
+ .flowi4_proto = ip4h->protocol,
+ .daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
+ };
+ struct rtable *rt;
+
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto out_drop;
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto out_drop;
+ }
- skb_dst_set(skb, &rt->dst);
+ skb_dst_set(skb, &rt->dst);
+ }
- err = bpf_out_neigh_v4(net, skb);
+ err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
@@ -2348,14 +2377,16 @@ out_xmit:
return ret;
}
#else
-static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
+static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
+ struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
}
#endif /* CONFIG_INET */
-static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev)
+static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
+ struct bpf_nh_params *nh)
{
struct ethhdr *ethh = eth_hdr(skb);
@@ -2370,9 +2401,9 @@ static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev)
skb_reset_network_header(skb);
if (skb->protocol == htons(ETH_P_IP))
- return __bpf_redirect_neigh_v4(skb, dev);
+ return __bpf_redirect_neigh_v4(skb, dev, nh);
else if (skb->protocol == htons(ETH_P_IPV6))
- return __bpf_redirect_neigh_v6(skb, dev);
+ return __bpf_redirect_neigh_v6(skb, dev, nh);
out:
kfree_skb(skb);
return -ENOTSUPP;
@@ -2382,7 +2413,8 @@ out:
enum {
BPF_F_NEIGH = (1ULL << 1),
BPF_F_PEER = (1ULL << 2),
-#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER)
+ BPF_F_NEXTHOP = (1ULL << 3),
+#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
};
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
@@ -2455,7 +2487,8 @@ int skb_do_redirect(struct sk_buff *skb)
return -EAGAIN;
}
return flags & BPF_F_NEIGH ?
- __bpf_redirect_neigh(skb, dev) :
+ __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ?
+ &ri->nh : NULL) :
__bpf_redirect(skb, dev, flags);
out_drop:
kfree_skb(skb);
@@ -2504,16 +2537,21 @@ static const struct bpf_func_proto bpf_redirect_peer_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_2(bpf_redirect_neigh, u32, ifindex, u64, flags)
+BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
+ int, plen, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- if (unlikely(flags))
+ if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT;
- ri->flags = BPF_F_NEIGH;
+ ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0);
ri->tgt_index = ifindex;
+ BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params));
+ if (plen)
+ memcpy(&ri->nh, params, sizeof(ri->nh));
+
return TC_ACT_REDIRECT;
}
@@ -2522,7 +2560,9 @@ static const struct bpf_func_proto bpf_redirect_neigh_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
- .arg2_type = ARG_ANYTHING,
+ .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
@@ -4693,7 +4733,8 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
- sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
+ sk->sk_max_pacing_rate = (val == ~0U) ?
+ ~0UL : (unsigned int)val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate);
break;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 68e0682450c6..7d7223691783 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3709,13 +3709,13 @@ static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
}
-static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- struct net_device *dev;
+ size_t min_ifinfo_dump_size = 0;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
- u16 min_ifinfo_dump_size = 0;
+ struct net_device *dev;
int hdrlen;
/* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
@@ -3735,9 +3735,8 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
*/
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
- min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
- if_nlmsg_size(dev,
- ext_filter_mask));
+ min_ifinfo_dump_size = max(min_ifinfo_dump_size,
+ if_nlmsg_size(dev, ext_filter_mask));
}
rcu_read_unlock();
@@ -5494,7 +5493,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
- u16 min_dump_alloc = 0;
+ u32 min_dump_alloc = 0;
link = rtnl_get_link(family, type);
if (!link || !link->dumpit) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 4e8729357122..727ea1cc633c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1163,7 +1163,7 @@ set_sndbuf:
case SO_MAX_PACING_RATE:
{
- unsigned long ulval = (val == ~0U) ? ~0UL : val;
+ unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
if (sizeof(ulval) != sizeof(val) &&
optlen >= sizeof(ulval) &&
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 945a9bd5ba35..0a5aa982c60d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -123,6 +123,7 @@ static const struct dsa_device_ops ksz8795_netdev_ops = {
.xmit = ksz8795_xmit,
.rcv = ksz8795_rcv,
.overhead = KSZ_INGRESS_TAG_LEN,
+ .tail_tag = true,
};
DSA_TAG_DRIVER(ksz8795_netdev_ops);
@@ -199,6 +200,7 @@ static const struct dsa_device_ops ksz9477_netdev_ops = {
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
.overhead = KSZ9477_INGRESS_TAG_LEN,
+ .tail_tag = true,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 07f67ced962a..005faea415a4 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -239,7 +239,7 @@ static struct {
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
*
- * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
+ * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
* Note: called with BH disabled
*/
@@ -267,7 +267,10 @@ bool icmp_global_allow(void)
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
- credit--;
+ /* We want to use a credit of one in average, but need to randomize
+ * it for security reasons.
+ */
+ credit = max_t(int, credit - prandom_u32_max(3), 0);
rc = true;
}
WRITE_ONCE(icmp_global.credit, credit);
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 8c0f17c6863c..0dc43ad28eb9 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -845,7 +845,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
remove_nh_grp_entry(net, nhge, nlinfo);
/* make sure all see the newly published array before releasing rtnl */
- synchronize_rcu();
+ synchronize_net();
}
static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67f10d3ec240..fc445833b5e5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5827,6 +5827,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
tcp_data_snd_check(sk);
if (!inet_csk_ack_scheduled(sk))
goto no_ack;
+ } else {
+ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
}
__tcp_ack_snd_check(sk, 0);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index fed9666a2f7d..054d287eb13d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -355,6 +355,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
ipv6_hdr(skb)->payload_len = htons(payload_len);
ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
if (skb->ip_summed == CHECKSUM_COMPLETE)
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 2def85718d94..ef59e25dc482 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -300,5 +300,6 @@ static void __exit mpls_iptunnel_exit(void)
module_exit(mpls_iptunnel_exit);
MODULE_ALIAS_RTNL_LWT(MPLS);
+MODULE_SOFTDEP("post: mpls_gso");
MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
MODULE_LICENSE("GPL v2");
diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
index 698bc3525160..a014149aa323 100644
--- a/net/mptcp/Kconfig
+++ b/net/mptcp/Kconfig
@@ -19,14 +19,11 @@ config INET_MPTCP_DIAG
config MPTCP_IPV6
bool "MPTCP: IPv6 support for Multipath TCP"
- select IPV6
+ depends on IPV6=y
default y
-endif
-
config MPTCP_KUNIT_TESTS
tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS
- select MPTCP
depends on KUNIT
default KUNIT_ALL_TESTS
help
@@ -39,3 +36,4 @@ config MPTCP_KUNIT_TESTS
If unsure, say N.
+endif
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 092a2d48bfd3..a044dd43411d 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -241,7 +241,6 @@ static void mptcp_parse_option(const struct sk_buff *skb,
}
mp_opt->add_addr = 1;
- mp_opt->port = 0;
mp_opt->addr_id = *ptr++;
pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
@@ -297,6 +296,8 @@ void mptcp_get_options(const struct sk_buff *skb,
mp_opt->mp_capable = 0;
mp_opt->mp_join = 0;
mp_opt->add_addr = 0;
+ mp_opt->ahmac = 0;
+ mp_opt->port = 0;
mp_opt->rm_addr = 0;
mp_opt->dss = 0;
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index dc2e7da2742a..7da51390cea6 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -539,8 +539,8 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
if (new_state != cp->state) {
struct ip_vs_dest *dest = cp->dest;
- IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
- "%s:%d state: %s->%s conn->refcnt:%d\n",
+ IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] c:%s:%d v:%s:%d "
+ "d:%s:%d state: %s->%s conn->refcnt:%d\n",
pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
@@ -548,10 +548,12 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
th->fin ? 'F' : '.',
th->ack ? 'A' : '.',
th->rst ? 'R' : '.',
- IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
- ntohs(cp->dport),
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+ ntohs(cp->vport),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
+ ntohs(cp->dport),
tcp_state_name(cp->state),
tcp_state_name(new_state),
refcount_read(&cp->refcnt));
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index e8c86ee4c1c4..c8fb2187ad4b 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -541,13 +541,20 @@ static bool tcp_in_window(const struct nf_conn *ct,
swin = win << sender->td_scale;
sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
- /*
- * We haven't seen traffic in the other direction yet
- * but we have to tweak window tracking to pass III
- * and IV until that happens.
- */
- if (receiver->td_maxwin == 0)
+ if (receiver->td_maxwin == 0) {
+ /* We haven't seen traffic in the other
+ * direction yet but we have to tweak window
+ * tracking to pass III and IV until that
+ * happens.
+ */
receiver->td_end = receiver->td_maxend = sack;
+ } else if (sack == receiver->td_end + 1) {
+ /* Likely a reply to a keepalive.
+ * Needed for III.
+ */
+ receiver->td_end++;
+ }
+
}
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
&& dir == IP_CT_DIR_ORIGINAL)
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
index 2b01a151eaa8..a579e59ee5c5 100644
--- a/net/netfilter/nf_dup_netdev.c
+++ b/net/netfilter/nf_dup_netdev.c
@@ -19,6 +19,7 @@ static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
skb_push(skb, skb->mac_len);
skb->dev = dev;
+ skb->tstamp = 0;
dev_queue_xmit(skb);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9957e0ed8658..65cb8e3c13d9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -302,7 +302,7 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
struct nft_expr *expr;
expr = nft_expr_first(rule);
- while (expr != nft_expr_last(rule) && expr->ops) {
+ while (nft_expr_more(rule, expr)) {
if (expr->ops->activate)
expr->ops->activate(ctx, expr);
@@ -317,7 +317,7 @@ static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
struct nft_expr *expr;
expr = nft_expr_first(rule);
- while (expr != nft_expr_last(rule) && expr->ops) {
+ while (nft_expr_more(rule, expr)) {
if (expr->ops->deactivate)
expr->ops->deactivate(ctx, expr, phase);
@@ -3080,7 +3080,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
* is called on error from nf_tables_newrule().
*/
expr = nft_expr_first(rule);
- while (expr != nft_expr_last(rule) && expr->ops) {
+ while (nft_expr_more(rule, expr)) {
next = nft_expr_next(expr);
nf_tables_expr_destroy(ctx, expr);
expr = next;
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 7c7e06624dc3..9f625724a20f 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -37,7 +37,7 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
struct nft_expr *expr;
expr = nft_expr_first(rule);
- while (expr->ops && expr != nft_expr_last(rule)) {
+ while (nft_expr_more(rule, expr)) {
if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
num_actions++;
@@ -61,7 +61,7 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
ctx->net = net;
ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
- while (expr->ops && expr != nft_expr_last(rule)) {
+ while (nft_expr_more(rule, expr)) {
if (!expr->ops->offload) {
err = -EOPNOTSUPP;
goto err_out;
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 3087e23297db..b77985986b24 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -138,6 +138,7 @@ static void nft_fwd_neigh_eval(const struct nft_expr *expr,
return;
skb->dev = dev;
+ skb->tstamp = 0;
neigh_xmit(neigh_table, dev, addr, skb);
out:
regs->verdict.code = verdict;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index e894254c17d4..8709f3d4e7c4 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1217,7 +1217,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
u32 idx;
char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 87c286ad660e..f3486a37361a 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -175,7 +175,7 @@ static struct table_instance *table_instance_alloc(int new_size)
static void __mask_array_destroy(struct mask_array *ma)
{
- free_percpu(ma->masks_usage_cntr);
+ free_percpu(ma->masks_usage_stats);
kfree(ma);
}
@@ -199,15 +199,15 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
ma->masks_usage_zero_cntr[i] = 0;
for_each_possible_cpu(cpu) {
- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
- cpu);
+ struct mask_array_stats *stats;
unsigned int start;
u64 counter;
+ stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&ma->syncp);
- counter = usage_counters[i];
- } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ counter = stats->usage_cntrs[i];
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
ma->masks_usage_zero_cntr[i] += counter;
}
@@ -230,9 +230,10 @@ static struct mask_array *tbl_mask_array_alloc(int size)
sizeof(struct sw_flow_mask *) *
size);
- new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
- __alignof__(u64));
- if (!new->masks_usage_cntr) {
+ new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
+ sizeof(u64) * size,
+ __alignof__(u64));
+ if (!new->masks_usage_stats) {
kfree(new);
return NULL;
}
@@ -722,6 +723,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
/* Flow lookup does full lookup on flow table. It starts with
* mask from index passed in *index.
+ * This function MUST be called with BH disabled due to the use
+ * of CPU specific variables.
*/
static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct table_instance *ti,
@@ -731,7 +734,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u32 *n_cache_hit,
u32 *index)
{
- u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
+ struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
struct sw_flow *flow;
struct sw_flow_mask *mask;
int i;
@@ -741,9 +744,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
if (mask) {
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) {
- u64_stats_update_begin(&ma->syncp);
- usage_counters[*index]++;
- u64_stats_update_end(&ma->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->usage_cntrs[*index]++;
+ u64_stats_update_end(&stats->syncp);
(*n_cache_hit)++;
return flow;
}
@@ -762,9 +765,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found */
*index = i;
- u64_stats_update_begin(&ma->syncp);
- usage_counters[*index]++;
- u64_stats_update_end(&ma->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ stats->usage_cntrs[*index]++;
+ u64_stats_update_end(&stats->syncp);
return flow;
}
}
@@ -850,9 +853,17 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
u32 __always_unused n_cache_hit;
+ struct sw_flow *flow;
u32 index = 0;
- return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
+ /* This function gets called trough the netlink interface and therefore
+ * is preemptible. However, flow_lookup() function needs to be called
+ * with BH disabled due to CPU specific variables.
+ */
+ local_bh_disable();
+ flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
+ local_bh_enable();
+ return flow;
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
@@ -1109,7 +1120,6 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *mask;
- unsigned int start;
int cpu;
mask = rcu_dereference_ovsl(ma->masks[i]);
@@ -1120,14 +1130,16 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
masks_and_count[i].counter = 0;
for_each_possible_cpu(cpu) {
- u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
- cpu);
+ struct mask_array_stats *stats;
+ unsigned int start;
u64 counter;
+ stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&ma->syncp);
- counter = usage_counters[i];
- } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ counter = stats->usage_cntrs[i];
+ } while (u64_stats_fetch_retry_irq(&stats->syncp,
+ start));
masks_and_count[i].counter += counter;
}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index d8fb7a3a3dfd..9e659db78c05 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -38,12 +38,16 @@ struct mask_count {
u64 counter;
};
+struct mask_array_stats {
+ struct u64_stats_sync syncp;
+ u64 usage_cntrs[];
+};
+
struct mask_array {
struct rcu_head rcu;
int count, max;
- u64 __percpu *masks_usage_cntr;
+ struct mask_array_stats __percpu *masks_usage_stats;
u64 *masks_usage_zero_cntr;
- struct u64_stats_sync syncp;
struct sw_flow_mask __rcu *masks[];
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 9c79fb92c2da..aba3cd85f284 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -156,11 +156,11 @@ tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
__be16 target_dst = target.dst.u.udp.port;
if (target_src != tuple->src.u.udp.port)
- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
offsetof(struct udphdr, source),
0xFFFF, be16_to_cpu(target_src));
if (target_dst != tuple->dst.u.udp.port)
- tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
offsetof(struct udphdr, dest),
0xFFFF, be16_to_cpu(target_dst));
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index a229751ee8c4..85c0d0d5b9da 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -459,7 +459,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
0, flags,
- key_id, 0);
+ key_id, opts_len);
} else {
NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
ret = -EINVAL;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 41a55c6cbeb8..faeabff283a2 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3712,7 +3712,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->gate.num_entries = tcf_gate_num_entries(act);
err = tcf_gate_get_entries(entry, act);
if (err)
- goto err_out;
+ goto err_out_locked;
} else {
err = -EOPNOTSUPP;
goto err_out_locked;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 940d176e0e87..d4beca895992 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -108,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
{
struct tipc_bc_base *bb = tipc_bc_base(net);
int all_dests = tipc_link_bc_peers(bb->link);
+ int max_win = tipc_link_max_win(bb->link);
+ int min_win = tipc_link_min_win(bb->link);
int i, mtu, prim;
bb->primary_bearer = INVALID_BEARER_ID;
@@ -121,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
continue;
mtu = tipc_bearer_mtu(net, i);
- if (mtu < tipc_link_mtu(bb->link))
+ if (mtu < tipc_link_mtu(bb->link)) {
tipc_link_set_mtu(bb->link, mtu);
+ tipc_link_set_queue_limits(bb->link,
+ min_win,
+ max_win);
+ }
bb->bcast_support &= tipc_bearer_bcast_support(net, i);
if (bb->dests[i] < all_dests)
continue;
@@ -585,7 +591,7 @@ static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
if (max_win > TIPC_MAX_LINK_WIN)
return -EINVAL;
tipc_bcast_lock(net);
- tipc_link_set_queue_limits(l, BCLINK_WIN_MIN, max_win);
+ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
tipc_bcast_unlock(net);
return 0;
}