summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ceph/osdmap.c35
-rw-r--r--net/compat.c7
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/filter.c24
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/tcp_output.c43
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/ip6_gre.c21
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h14
-rw-r--r--net/netfilter/nf_nat_core.c12
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/sched/cls_flower.c15
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sched/sch_hfsc.c23
-rw-r--r--net/smc/af_smc.c16
-rw-r--r--net/smc/smc.h2
-rw-r--r--net/smc/smc_clc.c10
-rw-r--r--net/smc/smc_clc.h3
-rw-r--r--net/smc/smc_close.c27
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_ib.c1
-rw-r--r--net/smc/smc_pnet.c4
-rw-r--r--net/smc/smc_rx.c2
-rw-r--r--net/smc/smc_tx.c12
-rw-r--r--net/smc/smc_wr.c2
-rw-r--r--net/wireless/nl80211.c6
30 files changed, 231 insertions, 135 deletions
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index f358d0bfa76b..79d14d70b7ea 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -2445,19 +2445,34 @@ static void apply_upmap(struct ceph_osdmap *osdmap,
pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
if (pg) {
- for (i = 0; i < raw->size; i++) {
- for (j = 0; j < pg->pg_upmap_items.len; j++) {
- int from = pg->pg_upmap_items.from_to[j][0];
- int to = pg->pg_upmap_items.from_to[j][1];
-
- if (from == raw->osds[i]) {
- if (!(to != CRUSH_ITEM_NONE &&
- to < osdmap->max_osd &&
- osdmap->osd_weight[to] == 0))
- raw->osds[i] = to;
+ /*
+ * Note: this approach does not allow a bidirectional swap,
+ * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
+ */
+ for (i = 0; i < pg->pg_upmap_items.len; i++) {
+ int from = pg->pg_upmap_items.from_to[i][0];
+ int to = pg->pg_upmap_items.from_to[i][1];
+ int pos = -1;
+ bool exists = false;
+
+ /* make sure replacement doesn't already appear */
+ for (j = 0; j < raw->size; j++) {
+ int osd = raw->osds[j];
+
+ if (osd == to) {
+ exists = true;
break;
}
+ /* ignore mapping if target is marked out */
+ if (osd == from && pos < 0 &&
+ !(to != CRUSH_ITEM_NONE &&
+ to < osdmap->max_osd &&
+ osdmap->osd_weight[to] == 0)) {
+ pos = j;
+ }
}
+ if (!exists && pos >= 0)
+ raw->osds[pos] = to;
}
}
}
diff --git a/net/compat.c b/net/compat.c
index 6ded6c821d7a..22381719718c 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -185,6 +185,13 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
}
+ /*
+ * check the length of messages copied in is the same as the
+ * what we get from the first loop
+ */
+ if ((char *)kcmsg - (char *)kcmsg_base != kcmlen)
+ goto Einval;
+
/* Ok, looks like we made it. Hook it up and return success. */
kmsg->msg_control = kcmsg_base;
kmsg->msg_controllen = kcmlen;
diff --git a/net/core/dev.c b/net/core/dev.c
index 37d6a3c59e69..97abddd9039a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1948,8 +1948,12 @@ again:
goto again;
}
out_unlock:
- if (pt_prev)
- pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
+ if (pt_prev) {
+ if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
+ pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
+ else
+ kfree_skb(skb2);
+ }
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
@@ -3892,6 +3896,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
__skb_pull(skb, off);
else if (off < 0)
__skb_push(skb, -off);
+ skb->mac_header += off;
switch (act) {
case XDP_REDIRECT:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6a582ae4c5d9..3228411ada0f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -525,6 +525,8 @@ convert_link_ksettings_to_legacy_settings(
= link_ksettings->base.eth_tp_mdix;
legacy_settings->eth_tp_mdix_ctrl
= link_ksettings->base.eth_tp_mdix_ctrl;
+ legacy_settings->transceiver
+ = link_ksettings->base.transceiver;
return retval;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 24dd33dd9f04..82edad58d066 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1794,7 +1794,7 @@ struct redirect_info {
u32 flags;
struct bpf_map *map;
struct bpf_map *map_to_flush;
- const struct bpf_prog *map_owner;
+ unsigned long map_owner;
};
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@ -2500,11 +2500,17 @@ void xdp_do_flush_map(void)
}
EXPORT_SYMBOL_GPL(xdp_do_flush_map);
+static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
+ unsigned long aux)
+{
+ return (unsigned long)xdp_prog->aux != aux;
+}
+
static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
- const struct bpf_prog *map_owner = ri->map_owner;
+ unsigned long map_owner = ri->map_owner;
struct bpf_map *map = ri->map;
struct net_device *fwd = NULL;
u32 index = ri->ifindex;
@@ -2512,9 +2518,9 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
ri->ifindex = 0;
ri->map = NULL;
- ri->map_owner = NULL;
+ ri->map_owner = 0;
- if (unlikely(map_owner != xdp_prog)) {
+ if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
err = -EFAULT;
map = NULL;
goto err;
@@ -2574,7 +2580,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
- const struct bpf_prog *map_owner = ri->map_owner;
+ unsigned long map_owner = ri->map_owner;
struct bpf_map *map = ri->map;
struct net_device *fwd = NULL;
u32 index = ri->ifindex;
@@ -2583,10 +2589,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
ri->ifindex = 0;
ri->map = NULL;
- ri->map_owner = NULL;
+ ri->map_owner = 0;
if (map) {
- if (unlikely(map_owner != xdp_prog)) {
+ if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
err = -EFAULT;
map = NULL;
goto err;
@@ -2632,7 +2638,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
ri->ifindex = ifindex;
ri->flags = flags;
ri->map = NULL;
- ri->map_owner = NULL;
+ ri->map_owner = 0;
return XDP_REDIRECT;
}
@@ -2646,7 +2652,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
};
BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
- const struct bpf_prog *, map_owner)
+ unsigned long, map_owner)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 9707372b78ed..8a91ebbf0c01 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -266,7 +266,7 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
#if IS_ENABLED(CONFIG_IPV6)
if (tb->fast_sk_family == AF_INET6)
return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
- &sk->sk_v6_rcv_saddr,
+ inet6_rcv_saddr(sk),
tb->fast_rcv_saddr,
sk->sk_rcv_saddr,
tb->fast_ipv6_only,
@@ -321,13 +321,14 @@ tb_found:
goto fail_unlock;
}
success:
- if (!hlist_empty(&tb->owners)) {
+ if (hlist_empty(&tb->owners)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
tb->fastreuseport = FASTREUSEPORT_ANY;
tb->fastuid = uid;
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
#endif
@@ -354,6 +355,7 @@ success:
tb->fastuid = uid;
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
+ tb->fast_sk_family = sk->sk_family;
#if IS_ENABLED(CONFIG_IPV6)
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
#endif
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1c839c99114c..0bc9e46a5369 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1806,40 +1806,6 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
return !after(end_seq, tcp_wnd_end(tp));
}
-/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
- * should be put on the wire right now. If so, it returns the number of
- * packets allowed by the congestion window.
- */
-static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
- unsigned int cur_mss, int nonagle)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- unsigned int cwnd_quota;
-
- tcp_init_tso_segs(skb, cur_mss);
-
- if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
- return 0;
-
- cwnd_quota = tcp_cwnd_test(tp, skb);
- if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
- cwnd_quota = 0;
-
- return cwnd_quota;
-}
-
-/* Test if sending is allowed right now. */
-bool tcp_may_send_now(struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_send_head(sk);
-
- return skb &&
- tcp_snd_test(sk, skb, tcp_current_mss(sk),
- (tcp_skb_is_last(sk, skb) ?
- tp->nonagle : TCP_NAGLE_PUSH));
-}
-
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
* which is put after SKB on the list. It is very much like
* tcp_fragment() except that it may make several kinds of assumptions
@@ -3423,6 +3389,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
goto done;
}
+ /* data was not sent, this is our new send_head */
+ sk->sk_send_head = syn_data;
+ tp->packets_out -= tcp_skb_pcount(syn_data);
+
fallback:
/* Send a regular SYN with Fast Open cookie request option */
if (fo->cookie.len > 0)
@@ -3475,6 +3445,11 @@ int tcp_connect(struct sock *sk)
*/
tp->snd_nxt = tp->write_seq;
tp->pushed_seq = tp->write_seq;
+ buff = tcp_send_head(sk);
+ if (unlikely(buff)) {
+ tp->snd_nxt = TCP_SKB_CB(buff)->seq;
+ tp->pushed_seq = TCP_SKB_CB(buff)->seq;
+ }
TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c2e2a78787ec..96861c702c06 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1399,10 +1399,18 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
-static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(struct net *net,
+ struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
+ if (!idev)
+ return false;
+ if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ return false;
+ if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+ return false;
+
+ return true;
#else
return false;
#endif
@@ -1472,7 +1480,7 @@ static int ipv6_get_saddr_eval(struct net *net,
/* Rule 3: Avoid deprecated and optimistic addresses */
u8 avoid = IFA_F_DEPRECATED;
- if (!ipv6_use_optimistic_addr(score->ifa->idev))
+ if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
avoid |= IFA_F_OPTIMISTIC;
ret = ipv6_saddr_preferred(score->addr_type) ||
!(score->ifa->flags & avoid);
@@ -2460,7 +2468,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
int max_addresses = in6_dev->cnf.max_addresses;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if (in6_dev->cnf.optimistic_dad &&
+ if ((net->ipv6.devconf_all->optimistic_dad ||
+ in6_dev->cnf.optimistic_dad) &&
!net->ipv6.devconf_all->forwarding && sllao)
addr_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3051,7 +3060,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
u32 addr_flags = flags | IFA_F_PERMANENT;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if (idev->cnf.optimistic_dad &&
+ if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
+ idev->cnf.optimistic_dad) &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
addr_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3810,6 +3820,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
goto out;
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
+ dev_net(dev)->ipv6.devconf_all->accept_dad < 1 ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
@@ -3841,7 +3852,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
*/
if (ifp->flags & IFA_F_OPTIMISTIC) {
ip6_ins_rt(ifp->rt);
- if (ipv6_use_optimistic_addr(idev)) {
+ if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
/* Because optimistic nodes can use this address,
* notify listeners. If DAD fails, RTM_DELADDR is sent.
*/
@@ -3897,7 +3908,9 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
- if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
+ if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
+ idev->cnf.accept_dad > 1) &&
+ !idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
@@ -4940,9 +4953,10 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
/* Don't send DELADDR notification for TENTATIVE address,
* since NEWADDR notification is sent only after removing
- * TENTATIVE flag.
+ * TENTATIVE flag, if DAD has not failed.
*/
- if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
+ if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
+ event == RTM_DELADDR)
return;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c82d41ef25e2..4b58f964da20 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -940,24 +940,25 @@ done:
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned int len)
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = skb_push(skb, t->hlen);
- __be16 *p = (__be16 *)(ipv6h+1);
+ struct ipv6hdr *ipv6h;
+ __be16 *p;
- ip6_flow_hdr(ipv6h, 0,
- ip6_make_flowlabel(dev_net(dev), skb,
- t->fl.u.ip6.flowlabel, true,
- &t->fl.u.ip6));
+ ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
+ ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
+ t->fl.u.ip6.flowlabel,
+ true, &t->fl.u.ip6));
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
ipv6h->daddr = t->parms.raddr;
- p[0] = t->parms.o_flags;
- p[1] = htons(type);
+ p = (__be16 *)(ipv6h + 1);
+ p[0] = t->parms.o_flags;
+ p[1] = htons(type);
/*
* Set the source hardware address.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3d6df489b39f..e0f9bc0421e0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2261,6 +2261,9 @@ static int __init ip6_tunnel_init(void)
{
int err;
+ if (!ipv6_mod_enabled())
+ return -EOPNOTSUPP;
+
err = register_pernet_device(&ip6_tnl_net_ops);
if (err < 0)
goto out_pernet;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e2ecfb137297..40d7234c27b9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1015,6 +1015,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ csum = skb->csum;
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index f236c0bc7b3f..51063d9ed0f7 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -1041,12 +1041,24 @@ out:
static int
mtype_head(struct ip_set *set, struct sk_buff *skb)
{
- const struct htype *h = set->data;
+ struct htype *h = set->data;
const struct htable *t;
struct nlattr *nested;
size_t memsize;
u8 htable_bits;
+ /* If any members have expired, set->elements will be wrong
+ * mytype_expire function will update it with the right count.
+ * we do not hold set->lock here, so grab it first.
+ * set->elements can still be incorrect in the case of a huge set,
+ * because elements might time out during the listing.
+ */
+ if (SET_WITH_TIMEOUT(set)) {
+ spin_lock_bh(&set->lock);
+ mtype_expire(set, h);
+ spin_unlock_bh(&set->lock);
+ }
+
rcu_read_lock_bh();
t = rcu_dereference_bh_nfnl(h->table);
memsize = mtype_ahash_memsize(h, t) + set->ext_size;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index f393a7086025..af8345fc4fbd 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct,
srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
+ lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
spin_lock_bh(lock);
hlist_add_head_rcu(&ct->nat_bysource,
&nf_nat_bysource[srchash]);
@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
unsigned int h;
h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
+ spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
hlist_del_rcu(&ct->nat_bysource);
- spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
+ spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
}
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
@@ -807,8 +807,8 @@ static int __init nf_nat_init(void)
/* Leave them the same for the moment. */
nf_nat_htable_size = nf_conntrack_htable_size;
- if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
- nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
+ if (nf_nat_htable_size < CONNTRACK_LOCKS)
+ nf_nat_htable_size = CONNTRACK_LOCKS;
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
if (!nf_nat_bysource)
@@ -821,7 +821,7 @@ static int __init nf_nat_init(void)
return ret;
}
- for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
+ for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_nat_locks[i]);
nf_ct_helper_expectfn_register(&follow_master_nat);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c26172995511..d288f52c53f7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1684,10 +1684,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
mutex_lock(&fanout_mutex);
- err = -EINVAL;
- if (!po->running)
- goto out;
-
err = -EALREADY;
if (po->fanout)
goto out;
@@ -1749,7 +1745,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
- if (match->type == type &&
+
+ spin_lock(&po->bind_lock);
+ if (po->running &&
+ match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
@@ -1761,6 +1760,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
err = 0;
}
}
+ spin_unlock(&po->bind_lock);
+
+ if (err && !refcount_read(&match->sk_ref)) {
+ list_del(&match->list);
+ kfree(match);
+ }
+
out:
if (err && rollover) {
kfree(rollover);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 1a267e77c6de..d230cb4c8094 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -922,28 +922,28 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_flags_valid(fnew->flags)) {
err = -EINVAL;
- goto errout;
+ goto errout_idr;
}
}
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
if (err)
- goto errout;
+ goto errout_idr;
err = fl_check_assign_mask(head, &mask);
if (err)
- goto errout;
+ goto errout_idr;
if (!tc_skip_sw(fnew->flags)) {
if (!fold && fl_lookup(head, &fnew->mkey)) {
err = -EEXIST;
- goto errout;
+ goto errout_idr;
}
err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
head->ht_params);
if (err)
- goto errout;
+ goto errout_idr;
}
if (!tc_skip_hw(fnew->flags)) {
@@ -952,7 +952,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
&mask.key,
fnew);
if (err)
- goto errout;
+ goto errout_idr;
}
if (!tc_in_hw(fnew->flags))
@@ -981,6 +981,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
kfree(tb);
return 0;
+errout_idr:
+ if (fnew->handle)
+ idr_remove_ext(&head->handle_idr, fnew->handle);
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 21cc45caf842..eeac606c95ab 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -32,6 +32,7 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (tc_skip_sw(head->flags))
return -1;
+ *res = head->res;
return tcf_exts_exec(skb, &head->exts, res);
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 1fb0c754b7fd..a0a198768aad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -685,6 +685,7 @@ void qdisc_reset(struct Qdisc *qdisc)
qdisc->gso_skb = NULL;
}
qdisc->q.qlen = 0;
+ qdisc->qstats.backlog = 0;
}
EXPORT_SYMBOL(qdisc_reset);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index daaf214e5201..3f88b75488b0 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -958,6 +958,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
}
if (cl != NULL) {
+ int old_flags;
+
if (parentid) {
if (cl->cl_parent &&
cl->cl_parent->cl_common.classid != parentid)
@@ -978,6 +980,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
}
sch_tree_lock(sch);
+ old_flags = cl->cl_flags;
+
if (rsc != NULL)
hfsc_change_rsc(cl, rsc, cur_time);
if (fsc != NULL)
@@ -986,10 +990,21 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
hfsc_change_usc(cl, usc, cur_time);
if (cl->qdisc->q.qlen != 0) {
- if (cl->cl_flags & HFSC_RSC)
- update_ed(cl, qdisc_peek_len(cl->qdisc));
- if (cl->cl_flags & HFSC_FSC)
- update_vf(cl, 0, cur_time);
+ int len = qdisc_peek_len(cl->qdisc);
+
+ if (cl->cl_flags & HFSC_RSC) {
+ if (old_flags & HFSC_RSC)
+ update_ed(cl, len);
+ else
+ init_ed(cl, len);
+ }
+
+ if (cl->cl_flags & HFSC_FSC) {
+ if (old_flags & HFSC_FSC)
+ update_vf(cl, 0, cur_time);
+ else
+ init_vf(cl, len);
+ }
}
sch_tree_unlock(sch);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 8c6d24b2995d..745f145d4c4d 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -282,6 +282,7 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
__be32 *subnet, u8 *prefix_len)
{
struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct in_device *in_dev;
struct sockaddr_in addr;
int rc = -ENOENT;
int len;
@@ -298,14 +299,17 @@ int smc_netinfo_by_tcpsk(struct socket *clcsock,
/* get address to which the internal TCP socket is bound */
kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len);
/* analyze IPv4 specific data of net_device belonging to TCP socket */
- for_ifa(dst->dev->ip_ptr) {
- if (ifa->ifa_address != addr.sin_addr.s_addr)
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dst->dev);
+ for_ifa(in_dev) {
+ if (!inet_ifa_match(addr.sin_addr.s_addr, ifa))
continue;
*prefix_len = inet_mask_len(ifa->ifa_mask);
*subnet = ifa->ifa_address & ifa->ifa_mask;
rc = 0;
break;
- } endfor_ifa(dst->dev->ip_ptr);
+ } endfor_ifa(in_dev);
+ rcu_read_unlock();
out_rel:
dst_release(dst);
@@ -509,7 +513,7 @@ decline_rdma:
/* RDMA setup failed, switch back to TCP */
smc->use_fallback = true;
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
- rc = smc_clc_send_decline(smc, reason_code, 0);
+ rc = smc_clc_send_decline(smc, reason_code);
if (rc < sizeof(struct smc_clc_msg_decline))
goto out_err;
}
@@ -804,8 +808,6 @@ static void smc_listen_work(struct work_struct *work)
rc = local_contact;
if (rc == -ENOMEM)
reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
- else if (rc == -ENOLINK)
- reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
goto decline_rdma;
}
link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
@@ -899,7 +901,7 @@ decline_rdma:
smc_conn_free(&new_smc->conn);
new_smc->use_fallback = true;
if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
- rc = smc_clc_send_decline(new_smc, reason_code, 0);
+ rc = smc_clc_send_decline(new_smc, reason_code);
if (rc < sizeof(struct smc_clc_msg_decline))
goto out_err;
}
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 6e44313e4467..0ccd6fa387ad 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -149,7 +149,7 @@ struct smc_connection {
atomic_t sndbuf_space; /* remaining space in sndbuf */
u16 tx_cdc_seq; /* sequence # for CDC send */
spinlock_t send_lock; /* protect wr_sends */
- struct work_struct tx_work; /* retry of smc_cdc_msg_send */
+ struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl.
* .prod cf. TCP rcv_nxt
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 3934913ab835..b7dd2743fb5c 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -95,9 +95,10 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
if (clcm->type == SMC_CLC_DECLINE) {
reason_code = SMC_CLC_DECL_REPLY;
- if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis)
- == SMC_CLC_DECL_SYNCERR)
+ if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = true;
+ smc_lgr_terminate(smc->conn.lgr);
+ }
}
out:
@@ -105,8 +106,7 @@ out:
}
/* send CLC DECLINE message across internal TCP socket */
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
- u8 out_of_sync)
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
{
struct smc_clc_msg_decline dclc;
struct msghdr msg;
@@ -118,7 +118,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
dclc.hdr.type = SMC_CLC_DECLINE;
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
dclc.hdr.version = SMC_CLC_V1;
- dclc.hdr.flag = out_of_sync ? 1 : 0;
+ dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
dclc.peer_diagnosis = htonl(peer_diag_info);
memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 13db8ce177c9..1c55414041d4 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -106,8 +106,7 @@ struct smc_ib_device;
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type);
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
- u8 out_of_sync);
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
u8 ibport);
int smc_clc_send_confirm(struct smc_sock *smc);
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 3c2e166b5d22..f0d16fb825f7 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -174,15 +174,15 @@ int smc_close_active(struct smc_sock *smc)
{
struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags;
- long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
struct smc_connection *conn = &smc->conn;
struct sock *sk = &smc->sk;
int old_state;
+ long timeout;
int rc = 0;
- if (sock_flag(sk, SOCK_LINGER) &&
- !(current->flags & PF_EXITING))
- timeout = sk->sk_lingertime;
+ timeout = current->flags & PF_EXITING ?
+ 0 : sock_flag(sk, SOCK_LINGER) ?
+ sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
again:
old_state = sk->sk_state;
@@ -208,7 +208,7 @@ again:
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
release_sock(sk);
- cancel_work_sync(&conn->tx_work);
+ cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
if (sk->sk_state == SMC_ACTIVE) {
/* send close request */
@@ -234,7 +234,7 @@ again:
if (!smc_cdc_rxed_any_close(conn))
smc_close_stream_wait(smc, timeout);
release_sock(sk);
- cancel_work_sync(&conn->tx_work);
+ cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
if (sk->sk_err != ECONNABORTED) {
/* confirm close from peer */
@@ -263,7 +263,9 @@ again:
/* peer sending PeerConnectionClosed will cause transition */
break;
case SMC_PROCESSABORT:
- cancel_work_sync(&conn->tx_work);
+ release_sock(sk);
+ cancel_delayed_work_sync(&conn->tx_work);
+ lock_sock(sk);
smc_close_abort(conn);
sk->sk_state = SMC_CLOSED;
smc_close_wait_tx_pends(smc);
@@ -411,13 +413,14 @@ void smc_close_sock_put_work(struct work_struct *work)
int smc_close_shutdown_write(struct smc_sock *smc)
{
struct smc_connection *conn = &smc->conn;
- long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
struct sock *sk = &smc->sk;
int old_state;
+ long timeout;
int rc = 0;
- if (sock_flag(sk, SOCK_LINGER))
- timeout = sk->sk_lingertime;
+ timeout = current->flags & PF_EXITING ?
+ 0 : sock_flag(sk, SOCK_LINGER) ?
+ sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
again:
old_state = sk->sk_state;
@@ -425,7 +428,7 @@ again:
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
release_sock(sk);
- cancel_work_sync(&conn->tx_work);
+ cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
/* send close wr request */
rc = smc_close_wr(conn);
@@ -439,7 +442,7 @@ again:
if (!smc_cdc_rxed_any_close(conn))
smc_close_stream_wait(smc, timeout);
release_sock(sk);
- cancel_work_sync(&conn->tx_work);
+ cancel_delayed_work_sync(&conn->tx_work);
lock_sock(sk);
/* confirm close from peer */
rc = smc_close_wr(conn);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 1a16d51e2330..20b66e79c5d6 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -25,8 +25,9 @@
#include "smc_cdc.h"
#include "smc_close.h"
-#define SMC_LGR_NUM_INCR 256
-#define SMC_LGR_FREE_DELAY (600 * HZ)
+#define SMC_LGR_NUM_INCR 256
+#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
+#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10)
static u32 smc_lgr_num; /* unique link group number */
@@ -107,8 +108,15 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
__smc_lgr_unregister_conn(conn);
}
write_unlock_bh(&lgr->conns_lock);
- if (reduced && !lgr->conns_num)
- schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY);
+ if (!reduced || lgr->conns_num)
+ return;
+ /* client link group creation always follows the server link group
+ * creation. For client use a somewhat higher removal delay time,
+ * otherwise there is a risk of out-of-sync link groups.
+ */
+ mod_delayed_work(system_wq, &lgr->free_work,
+ lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
+ SMC_LGR_FREE_DELAY_SERV);
}
static void smc_lgr_free_work(struct work_struct *work)
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 547e0e113b17..0b5852299158 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -380,6 +380,7 @@ static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport);
if (ndev) {
memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN);
+ dev_put(ndev);
} else if (!rc) {
memcpy(&smcibdev->mac[ibport - 1][0],
&smcibdev->gid[ibport - 1].raw[8], 3);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 78f7af28ae4f..31f8453c25c5 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -181,8 +181,10 @@ static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem)
sizeof(new_pnetelem->ndev->name)) ||
smc_pnet_same_ibname(pnetelem,
new_pnetelem->smcibdev->ibdev->name,
- new_pnetelem->ib_port))
+ new_pnetelem->ib_port)) {
+ dev_put(pnetelem->ndev);
goto found;
+ }
}
list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist);
rc = 0;
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index b17a333e9bb0..3e631ae4b6b6 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -148,6 +148,8 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
read_done = sock_intr_errno(timeo);
break;
}
+ if (!timeo)
+ return -EAGAIN;
}
if (!atomic_read(&conn->bytes_to_rcv)) {
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index e2228f6d1c25..ec49bc3c3283 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -24,6 +24,8 @@
#include "smc_cdc.h"
#include "smc_tx.h"
+#define SMC_TX_WORK_DELAY HZ
+
/***************************** sndbuf producer *******************************/
/* callback implementation for sk.sk_write_space()
@@ -405,7 +407,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
goto out_unlock;
}
rc = 0;
- schedule_work(&conn->tx_work);
+ schedule_delayed_work(&conn->tx_work,
+ SMC_TX_WORK_DELAY);
}
goto out_unlock;
}
@@ -429,7 +432,7 @@ out_unlock:
*/
static void smc_tx_work(struct work_struct *work)
{
- struct smc_connection *conn = container_of(work,
+ struct smc_connection *conn = container_of(to_delayed_work(work),
struct smc_connection,
tx_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
@@ -466,7 +469,8 @@ void smc_tx_consumer_update(struct smc_connection *conn)
if (!rc)
rc = smc_cdc_msg_send(conn, wr_buf, pend);
if (rc < 0) {
- schedule_work(&conn->tx_work);
+ schedule_delayed_work(&conn->tx_work,
+ SMC_TX_WORK_DELAY);
return;
}
smc_curs_write(&conn->rx_curs_confirmed,
@@ -485,6 +489,6 @@ void smc_tx_consumer_update(struct smc_connection *conn)
void smc_tx_init(struct smc_sock *smc)
{
smc->sk.sk_write_space = smc_tx_write_space;
- INIT_WORK(&smc->conn.tx_work, smc_tx_work);
+ INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
spin_lock_init(&smc->conn.send_lock);
}
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index ab56bda66783..525d91e0d57e 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -244,7 +244,7 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
int rc;
ib_req_notify_cq(link->smcibdev->roce_cq_send,
- IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS);
+ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
pend = container_of(priv, struct smc_wr_tx_pend, priv);
rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
&failed_wr);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 0df8023f480b..690874293cfc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9987,6 +9987,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ if (!setup.chandef.chan)
+ return -EINVAL;
+
err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
&setup.beacon_rate);
if (err)
@@ -10903,6 +10906,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
+ !tb[NL80211_REKEY_DATA_KCK])
+ return -EINVAL;
if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
return -ERANGE;
if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)