summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/can/af_can.c25
-rw-r--r--net/core/bpf_sk_storage.c3
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c26
-rw-r--r--net/core/neighbour.c7
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c7
-rw-r--r--net/core/sysctl_net_core.c7
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c28
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/icmp.c17
-rw-r--r--net/ipv6/ip6_flowlabel.c7
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/lapb/lapb_iface.c1
-rw-r--r--net/mac80211/ieee80211_i.h12
-rw-r--r--net/mac80211/key.c2
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/tdls.c23
-rw-r--r--net/mac80211/util.c8
-rw-r--r--net/mac80211/wpa.c7
-rw-r--r--net/mpls/Kconfig1
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/nfc/netlink.c3
-rw-r--r--net/openvswitch/vport-internal_dev.c18
-rw-r--r--net/sched/cls_flower.c34
-rw-r--r--net/sctp/sm_make_chunk.c8
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tls/tls_sw.c1
-rw-r--r--net/vmw_vsock/hyperv_transport.c4
-rw-r--r--net/vmw_vsock/virtio_transport_common.c4
-rw-r--r--net/wireless/Makefile1
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/nl80211.c99
-rw-r--r--net/wireless/pmsr.c4
-rw-r--r--net/wireless/scan.c4
-rw-r--r--net/wireless/util.c4
-rw-r--r--net/xdp/xdp_umem.c11
51 files changed, 407 insertions, 108 deletions
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 09fdd0aac4b9..b40e0bce67ea 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -426,9 +426,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
if (ax25->sk != NULL) {
+ local_bh_disable();
bh_lock_sock(ax25->sk);
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
bh_unlock_sock(ax25->sk);
+ local_bh_enable();
}
put:
diff --git a/net/can/af_can.c b/net/can/af_can.c
index e8fd5dc1780a..80281ef2ccbd 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -99,6 +99,7 @@ EXPORT_SYMBOL(can_ioctl);
static void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_error_queue);
}
static const struct can_proto *can_get_proto(int protocol)
@@ -952,6 +953,8 @@ static struct pernet_operations can_pernet_ops __read_mostly = {
static __init int can_init(void)
{
+ int err;
+
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
offsetof(struct canfd_frame, len) ||
@@ -965,15 +968,31 @@ static __init int can_init(void)
if (!rcv_cache)
return -ENOMEM;
- register_pernet_subsys(&can_pernet_ops);
+ err = register_pernet_subsys(&can_pernet_ops);
+ if (err)
+ goto out_pernet;
/* protocol register */
- sock_register(&can_family_ops);
- register_netdevice_notifier(&can_netdev_notifier);
+ err = sock_register(&can_family_ops);
+ if (err)
+ goto out_sock;
+ err = register_netdevice_notifier(&can_netdev_notifier);
+ if (err)
+ goto out_notifier;
+
dev_add_pack(&can_packet);
dev_add_pack(&canfd_packet);
return 0;
+
+out_notifier:
+ sock_unregister(PF_CAN);
+out_sock:
+ unregister_pernet_subsys(&can_pernet_ops);
+out_pernet:
+ kmem_cache_destroy(rcv_cache);
+
+ return err;
}
static __exit void can_exit(void)
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index cc9597a87770..d1c4e1f3be2c 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -633,7 +633,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&smap->map, attr);
- smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
+ /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
+ smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
nbuckets = 1U << smap->bucket_log;
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
GFP_USER | __GFP_NOWARN);
diff --git a/net/core/dev.c b/net/core/dev.c
index eb7fb6daa1ef..d6edd218babd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4923,8 +4923,36 @@ skip_classify:
}
if (unlikely(skb_vlan_tag_present(skb))) {
- if (skb_vlan_tag_get_id(skb))
+check_vlan_id:
+ if (skb_vlan_tag_get_id(skb)) {
+ /* Vlan id is non 0 and vlan_do_receive() above couldn't
+ * find vlan device.
+ */
skb->pkt_type = PACKET_OTHERHOST;
+ } else if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+ /* Outer header is 802.1P with vlan 0, inner header is
+ * 802.1Q or 802.1AD and vlan_do_receive() above could
+ * not find vlan dev for vlan id 0.
+ */
+ __vlan_hwaccel_clear_tag(skb);
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ goto out;
+ if (vlan_do_receive(&skb))
+ /* After stripping off 802.1P header with vlan 0
+ * vlan dev is found for inner header.
+ */
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ else
+ /* We have stripped outer 802.1P vlan 0 header.
+ * But could not find vlan dev.
+ * check again for vlan id to set OTHERHOST.
+ */
+ goto check_vlan_id;
+ }
/* Note: we might in the future use prio bits
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d08b1e19ce9c..4d1011b2e24f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -3020,6 +3020,11 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
match->mask.vlan.vlan_id =
ntohs(ext_m_spec->vlan_tci) & 0x0fff;
+ match->key.vlan.vlan_dei =
+ !!(ext_h_spec->vlan_tci & htons(0x1000));
+ match->mask.vlan.vlan_dei =
+ !!(ext_m_spec->vlan_tci & htons(0x1000));
+
match->key.vlan.vlan_priority =
(ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
match->mask.vlan.vlan_priority =
diff --git a/net/core/filter.c b/net/core/filter.c
index cd09bf5d21f4..f615e42cf4ef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5300,7 +5300,13 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *net;
int sdif;
- family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
+ if (len == sizeof(tuple->ipv4))
+ family = AF_INET;
+ else if (len == sizeof(tuple->ipv6))
+ family = AF_INET6;
+ else
+ return NULL;
+
if (unlikely(family == AF_UNSPEC || flags ||
!((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
@@ -5333,8 +5339,14 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
ifindex, proto, netns_id, flags);
- if (sk)
+ if (sk) {
sk = sk_to_full_sk(sk);
+ if (!sk_fullsock(sk)) {
+ if (!sock_flag(sk, SOCK_RCU_FREE))
+ sock_gen_put(sk);
+ return NULL;
+ }
+ }
return sk;
}
@@ -5365,8 +5377,14 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
flags);
- if (sk)
+ if (sk) {
sk = sk_to_full_sk(sk);
+ if (!sk_fullsock(sk)) {
+ if (!sock_flag(sk, SOCK_RCU_FREE))
+ sock_gen_put(sk);
+ return NULL;
+ }
+ }
return sk;
}
@@ -6726,6 +6744,7 @@ static bool sock_addr_is_valid_access(int off, int size,
case BPF_CGROUP_INET4_BIND:
case BPF_CGROUP_INET4_CONNECT:
case BPF_CGROUP_UDP4_SENDMSG:
+ case BPF_CGROUP_UDP4_RECVMSG:
break;
default:
return false;
@@ -6736,6 +6755,7 @@ static bool sock_addr_is_valid_access(int off, int size,
case BPF_CGROUP_INET6_BIND:
case BPF_CGROUP_INET6_CONNECT:
case BPF_CGROUP_UDP6_SENDMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
break;
default:
return false;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0e2c07355463..9e7fc929bc50 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3203,6 +3203,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ __acquires(tbl->lock)
__acquires(rcu_bh)
{
struct neigh_seq_state *state = seq->private;
@@ -3213,6 +3214,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
rcu_read_lock_bh();
state->nht = rcu_dereference_bh(tbl->nht);
+ read_lock(&tbl->lock);
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
@@ -3246,8 +3248,13 @@ out:
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
+ __releases(tbl->lock)
__releases(rcu_bh)
{
+ struct neigh_seq_state *state = seq->private;
+ struct neigh_table *tbl = state->tbl;
+
+ read_unlock(&tbl->lock);
rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_seq_stop);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 47c1aa9ee045..c8cd99c3603f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2337,6 +2337,7 @@ do_frag_list:
kv.iov_base = skb->data + offset;
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
+ msg.msg_flags = MSG_DONTWAIT;
ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
if (ret <= 0)
diff --git a/net/core/sock.c b/net/core/sock.c
index 2b3701958486..af09a23e4822 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1850,6 +1850,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
goto out;
}
RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
+#ifdef CONFIG_BPF_SYSCALL
+ RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
+#endif
newsk->sk_err = 0;
newsk->sk_err_soft = 0;
@@ -2320,6 +2323,7 @@ static void sk_leave_memory_pressure(struct sock *sk)
/* On 32bit arches, an skb frag is limited to 2^15 */
#define SKB_FRAG_PAGE_ORDER get_order(32768)
+DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
/**
* skb_page_frag_refill - check that a page_frag contains enough room
@@ -2344,7 +2348,8 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
}
pfrag->offset = 0;
- if (SKB_FRAG_PAGE_ORDER) {
+ if (SKB_FRAG_PAGE_ORDER &&
+ !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
/* Avoid direct reclaim but allow kswapd to wake */
pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_NOWARN |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 1a2685694abd..f9204719aeee 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -562,6 +562,13 @@ static struct ctl_table net_core_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
+ {
+ .procname = "high_order_alloc_disable",
+ .data = &net_high_order_alloc_disable_key.key,
+ .maxlen = sizeof(net_high_order_alloc_disable_key),
+ .mode = 0644,
+ .proc_handler = proc_do_static_key,
+ },
{ }
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b80410673915..bfa49a88d03a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -964,7 +964,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
{
struct net_device *dev;
struct fib_result res;
- int err;
+ int err = 0;
if (nh->fib_nh_flags & RTNH_F_ONLINK) {
unsigned int addr_type;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8c9189a41b13..16f9159234a2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -918,7 +918,7 @@ static int __ip_append_data(struct sock *sk,
uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
if (!uarg)
return -ENOBUFS;
- extra_uref = !skb; /* only extra ref if !MSG_MORE */
+ extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
if (rt->dst.dev->features & NETIF_F_SG &&
csummode == CHECKSUM_PARTIAL) {
paged = true;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4370f4246e86..073273b751f8 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -287,6 +287,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED),
SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP),
SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
+ SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 875867b64d6a..b6f14af926fa 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -39,6 +39,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
+static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
+static int tcp_min_snd_mss_max = 65535;
static int ip_privileged_port_min;
static int ip_privileged_port_max = 65535;
static int ip_ttl_min = 1;
@@ -559,6 +561,18 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &sysctl_fib_sync_mem_min,
.extra2 = &sysctl_fib_sync_mem_max,
},
+ {
+ .procname = "tcp_rx_skb_cache",
+ .data = &tcp_rx_skb_cache_key.key,
+ .mode = 0644,
+ .proc_handler = proc_do_static_key,
+ },
+ {
+ .procname = "tcp_tx_skb_cache",
+ .data = &tcp_tx_skb_cache_key.key,
+ .mode = 0644,
+ .proc_handler = proc_do_static_key,
+ },
{ }
};
@@ -758,6 +772,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "tcp_min_snd_mss",
+ .data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_min_snd_mss_min,
+ .extra2 = &tcp_min_snd_mss_max,
+ },
+ {
.procname = "tcp_probe_threshold",
.data = &init_net.ipv4.sysctl_tcp_probe_threshold,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f12d500ec85c..7dc9ab84bb69 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -317,6 +317,11 @@ struct tcp_splice_state {
unsigned long tcp_memory_pressure __read_mostly;
EXPORT_SYMBOL_GPL(tcp_memory_pressure);
+DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
+EXPORT_SYMBOL(tcp_rx_skb_cache_key);
+
+DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
+
void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
@@ -3868,6 +3873,7 @@ void __init tcp_init(void)
unsigned long limit;
unsigned int i;
+ BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 08a477e74cf3..d95ee40df6c2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1302,7 +1302,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
TCP_SKB_CB(skb)->seq += shifted;
tcp_skb_pcount_add(prev, pcount);
- BUG_ON(tcp_skb_pcount(skb) < pcount);
+ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
tcp_skb_pcount_add(skb, -pcount);
/* When we're adding to gso_segs == 1, gso_size will be zero,
@@ -1368,6 +1368,21 @@ static int skb_can_shift(const struct sk_buff *skb)
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
+ int pcount, int shiftlen)
+{
+ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
+ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
+ * to make sure not storing more than 65535 * 8 bytes per skb,
+ * even if current MSS is bigger.
+ */
+ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
+ return 0;
+ if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
+ return 0;
+ return skb_shift(to, from, shiftlen);
+}
+
/* Try collapsing SACK blocks spanning across multiple skbs to a single
* skb.
*/
@@ -1473,7 +1488,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
goto fallback;
- if (!skb_shift(prev, skb, len))
+ if (!tcp_skb_shift(prev, skb, pcount, len))
goto fallback;
if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
goto out;
@@ -1491,11 +1506,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
goto out;
len = skb->len;
- if (skb_shift(prev, skb, len)) {
- pcount += tcp_skb_pcount(skb);
- tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb),
+ pcount = tcp_skb_pcount(skb);
+ if (tcp_skb_shift(prev, skb, pcount, len))
+ tcp_shifted_skb(sk, prev, skb, state, pcount,
len, mss, 0);
- }
out:
return prev;
@@ -2648,7 +2662,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
struct tcp_sock *tp = tcp_sk(sk);
bool recovered = !before(tp->snd_una, tp->high_seq);
- if ((flag & FLAG_SND_UNA_ADVANCED) &&
+ if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
tcp_try_undo_loss(sk, false))
return;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bc86f9735f45..cfa81190a1b1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2628,6 +2628,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_ecn_fallback = 1;
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+ net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f429e856e263..00c01a01b547 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1296,6 +1296,11 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
if (nsize < 0)
nsize = 0;
+ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
+
if (skb_unclone(skb, gfp))
return -ENOMEM;
@@ -1454,8 +1459,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */
- if (mss_now < 48)
- mss_now = 48;
+ mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
return mss_now;
}
@@ -2747,7 +2751,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
if (next_skb_size <= skb_availroom(skb))
skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
next_skb_size);
- else if (!skb_shift(skb, next_skb, next_skb_size))
+ else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
return false;
}
tcp_highest_sack_replace(sk, next_skb, skb);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5bad937ce779..c801cd37cc2a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -155,6 +155,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
+ mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
}
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7c6228fbf5dd..eed59c847722 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -498,7 +498,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ inet_sdif(skb), &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
@@ -1773,6 +1777,10 @@ try_again:
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
+
+ if (cgroup_bpf_enabled)
+ BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
+ (struct sockaddr *)sin);
}
if (udp_sk(sk)->gro_enabled)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index bafdd04a768d..375b4b4f9bf5 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -393,23 +393,28 @@ relookup_failed:
return ERR_PTR(err);
}
-static int icmp6_iif(const struct sk_buff *skb)
+static struct net_device *icmp6_dev(const struct sk_buff *skb)
{
- int iif = skb->dev->ifindex;
+ struct net_device *dev = skb->dev;
/* for local traffic to local address, skb dev is the loopback
* device. Check if there is a dst attached to the skb and if so
* get the real device index. Same is needed for replies to a link
* local address on a device enslaved to an L3 master device
*/
- if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
+ if (unlikely(dev->ifindex == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
const struct rt6_info *rt6 = skb_rt6_info(skb);
if (rt6)
- iif = rt6->rt6i_idev->dev->ifindex;
+ dev = rt6->rt6i_idev->dev;
}
- return iif;
+ return dev;
+}
+
+static int icmp6_iif(const struct sk_buff *skb)
+{
+ return icmp6_dev(skb)->ifindex;
}
/*
@@ -810,7 +815,7 @@ out:
static int icmpv6_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
- struct net_device *dev = skb->dev;
+ struct net_device *dev = icmp6_dev(skb);
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
struct icmp6hdr *hdr;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 2f3eb7dc45da..545e339b8c4f 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -250,9 +250,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
rcu_read_lock_bh();
for_each_sk_fl_rcu(np, sfl) {
struct ip6_flowlabel *fl = sfl->fl;
- if (fl->label == label) {
+
+ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
fl->lastuse = jiffies;
- atomic_inc(&fl->users);
rcu_read_unlock_bh();
return fl;
}
@@ -618,7 +618,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
goto done;
}
fl1 = sfl->fl;
- atomic_inc(&fl1->users);
+ if (!atomic_inc_not_zero(&fl1->users))
+ fl1 = NULL;
break;
}
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 934c88f128ab..834475717110 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1340,7 +1340,7 @@ emsgsize:
uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
if (!uarg)
return -ENOBUFS;
- extra_uref = !skb; /* only extra ref if !MSG_MORE */
+ extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
if (rt->dst.dev->features & NETIF_F_SG &&
csummode == CHECKSUM_PARTIAL) {
paged = true;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 22369694b2fb..b2b2c0c38b87 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -298,7 +298,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb_network_header_len(skb));
rcu_read_lock();
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
+ __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
@@ -312,7 +312,7 @@ out_oom:
net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
out_fail:
rcu_read_lock();
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS);
rcu_read_unlock();
inet_frag_kill(&fq->q);
return -1;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b3418a7c5c74..70b01bd95022 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -239,7 +239,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
- inet6_sdif(skb), &udp_table, skb);
+ inet6_sdif(skb), &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
@@ -365,6 +365,10 @@ try_again:
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
+
+ if (cgroup_bpf_enabled)
+ BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
+ (struct sockaddr *)sin6);
}
if (udp_sk(sk)->gro_enabled)
@@ -511,7 +515,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
- inet6_iif(skb), inet6_sdif(skb), udptable, skb);
+ inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
if (!sk) {
/* No socket for error: try tunnels before discarding */
sk = ERR_PTR(-ENOENT);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 03f0cd872dce..5d2d1f746b91 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -177,6 +177,7 @@ int lapb_unregister(struct net_device *dev)
lapb = __lapb_devtostruct(dev);
if (!lapb)
goto out;
+ lapb_put(lapb);
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 539d3516d768..6396d46a9a71 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1432,7 +1432,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
+ if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return NULL;
}
@@ -2035,6 +2035,13 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
{
/*
+ * It's unsafe to try to do any work during reconfigure flow.
+ * When the flow ends the work will be requeued.
+ */
+ if (local->in_reconfig)
+ return false;
+
+ /*
* If quiescing is set, we are racing with __ieee80211_suspend.
* __ieee80211_suspend flushes the workers after setting quiescing,
* and we check quiescing / suspended before enqueing new workers.
@@ -2222,6 +2229,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
const u8 *addr);
void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
void ieee80211_tdls_chsw_work(struct work_struct *wk);
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+ const u8 *peer, u16 reason);
+const char *ieee80211_get_reason_code_string(u16 reason_code);
extern const struct ethtool_ops ieee80211_ethtool_ops;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5f58895ee0d2..157ff5f890d2 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -265,11 +265,9 @@ int ieee80211_set_tx_key(struct ieee80211_key *key)
{
struct sta_info *sta = key->sta;
struct ieee80211_local *local = key->local;
- struct ieee80211_key *old;
assert_key_lock(local);
- old = key_mtx_dereference(local, sta->ptk[sta->ptk_idx]);
sta->ptk_idx = key->conf.keyidx;
ieee80211_check_fast_xmit(sta);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a0cfe9debd65..2e7fa743c892 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -926,6 +926,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
+ ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
/* stop the beacon */
@@ -1217,7 +1218,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
- tmp_csa_settings = rcu_dereference(ifmsh->csa);
+ tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
+ lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(ifmsh->csa, NULL);
if (tmp_csa_settings)
kfree_rcu(tmp_csa_settings, rcu_head);
@@ -1239,6 +1241,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
+ lockdep_assert_held(&sdata->wdev.mtx);
+
tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
GFP_ATOMIC);
if (!tmp_csa_settings)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index faff4010871f..379d2ab6d327 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2960,7 +2960,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
#define case_WLAN(type) \
case WLAN_REASON_##type: return #type
-static const char *ieee80211_get_reason_code_string(u16 reason_code)
+const char *ieee80211_get_reason_code_string(u16 reason_code)
{
switch (reason_code) {
case_WLAN(UNSPECIFIED);
@@ -3025,6 +3025,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
if (len < 24 + 2)
return;
+ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+ return;
+ }
+
if (ifmgd->associated &&
ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
const u8 *bssid = ifmgd->associated->bssid;
@@ -3074,6 +3079,11 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
+ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+ return;
+ }
+
sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
mgmt->sa, reason_code,
ieee80211_get_reason_code_string(reason_code));
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 84a0c4d6962e..3c1ab870fefe 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3828,6 +3828,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
case NL80211_IFTYPE_STATION:
if (!bssid && !sdata->u.mgd.use_4addr)
return false;
+ if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
+ return false;
if (multicast)
return true;
return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ca97e1598c28..fca1f5477396 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1993,3 +1993,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk)
}
rtnl_unlock();
}
+
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+ const u8 *peer, u16 reason)
+{
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(&sdata->vif, peer);
+ if (!sta || !sta->tdls) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
+ peer, reason,
+ ieee80211_get_reason_code_string(reason));
+
+ ieee80211_tdls_oper_request(&sdata->vif, peer,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
+ GFP_ATOMIC);
+}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c1bc7bf31844..1b224fa27367 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2477,6 +2477,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_lock(&local->mtx);
ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
+
+ /* Requeue all works */
+ list_for_each_entry(sdata, &local->interfaces, list)
+ ieee80211_queue_work(&local->hw, &sdata->work);
}
ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
@@ -3792,7 +3796,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
}
/* Always allow software iftypes */
- if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
+ if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
+ (iftype == NL80211_IFTYPE_AP_VLAN &&
+ local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
if (radar_detect)
return -EINVAL;
return 0;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c59bb95a1975..a51c7909366e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1172,7 +1172,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
- u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
+ u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
@@ -1203,13 +1203,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
memcpy(nonce, hdr->addr2, ETH_ALEN);
memcpy(nonce + ETH_ALEN, ipn, 6);
+ mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
+ if (!mic)
+ return RX_DROP_UNUSABLE;
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
+ kfree(mic);
return RX_DROP_UNUSABLE;
}
+ kfree(mic);
}
memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index d9391beea980..d1ad69b7942a 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -26,6 +26,7 @@ config NET_MPLS_GSO
config MPLS_ROUTING
tristate "MPLS: routing support"
depends on NET_IP_TUNNEL || NET_IP_TUNNEL=n
+ depends on PROC_SYSCTL
---help---
Add support for forwarding of mpls packets.
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 500596130760..d25e91d7bdc1 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -23,7 +23,7 @@
#include "internal.h"
static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
- [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
+ [MPLS_IPTUNNEL_DST] = { .len = sizeof(u32) },
[MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 },
};
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 1180b3e58a0a..ea64c90b14e8 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -911,7 +911,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb,
u32 device_idx, target_idx;
int rc;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_TARGET_INDEX])
return -EINVAL;
device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 758bb1da31df..d2437b5b2f6a 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -157,7 +157,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
{
struct vport *vport;
struct internal_dev *internal_dev;
+ struct net_device *dev;
int err;
+ bool free_vport = true;
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
if (IS_ERR(vport)) {
@@ -165,8 +167,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
goto error;
}
- vport->dev = alloc_netdev(sizeof(struct internal_dev),
- parms->name, NET_NAME_USER, do_setup);
+ dev = alloc_netdev(sizeof(struct internal_dev),
+ parms->name, NET_NAME_USER, do_setup);
+ vport->dev = dev;
if (!vport->dev) {
err = -ENOMEM;
goto error_free_vport;
@@ -187,8 +190,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
rtnl_lock();
err = register_netdevice(vport->dev);
- if (err)
+ if (err) {
+ free_vport = false;
goto error_unlock;
+ }
dev_set_promiscuity(vport->dev, 1);
rtnl_unlock();
@@ -198,11 +203,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
error_unlock:
rtnl_unlock();
- free_percpu(vport->dev->tstats);
+ free_percpu(dev->tstats);
error_free_netdev:
- free_netdev(vport->dev);
+ free_netdev(dev);
error_free_vport:
- ovs_vport_free(vport);
+ if (free_vport)
+ ovs_vport_free(vport);
error:
return ERR_PTR(err);
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c388372df0e2..eedd5786c084 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -320,10 +320,13 @@ static int fl_init(struct tcf_proto *tp)
return rhashtable_init(&head->ht, &mask_ht_params);
}
-static void fl_mask_free(struct fl_flow_mask *mask)
+static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
{
- WARN_ON(!list_empty(&mask->filters));
- rhashtable_destroy(&mask->ht);
+ /* temporary masks don't have their filters list and ht initialized */
+ if (mask_init_done) {
+ WARN_ON(!list_empty(&mask->filters));
+ rhashtable_destroy(&mask->ht);
+ }
kfree(mask);
}
@@ -332,7 +335,15 @@ static void fl_mask_free_work(struct work_struct *work)
struct fl_flow_mask *mask = container_of(to_rcu_work(work),
struct fl_flow_mask, rwork);
- fl_mask_free(mask);
+ fl_mask_free(mask, true);
+}
+
+static void fl_uninit_mask_free_work(struct work_struct *work)
+{
+ struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+ struct fl_flow_mask, rwork);
+
+ fl_mask_free(mask, false);
}
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
@@ -1346,9 +1357,6 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
if (err)
goto errout_destroy;
- /* Wait until any potential concurrent users of mask are finished */
- synchronize_rcu();
-
spin_lock(&head->masks_lock);
list_add_tail_rcu(&newmask->list, &head->masks);
spin_unlock(&head->masks_lock);
@@ -1375,11 +1383,7 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
/* Insert mask as temporary node to prevent concurrent creation of mask
* with same key. Any concurrent lookups with same key will return
- * -EAGAIN because mask's refcnt is zero. It is safe to insert
- * stack-allocated 'mask' to masks hash table because we call
- * synchronize_rcu() before returning from this function (either in case
- * of error or after replacing it with heap-allocated mask in
- * fl_create_new_mask()).
+ * -EAGAIN because mask's refcnt is zero.
*/
fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
&mask->ht_node,
@@ -1414,8 +1418,6 @@ static int fl_check_assign_mask(struct cls_fl_head *head,
errout_cleanup:
rhashtable_remove_fast(&head->ht, &mask->ht_node,
mask_ht_params);
- /* Wait until any potential concurrent users of mask are finished */
- synchronize_rcu();
return ret;
}
@@ -1644,7 +1646,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
*arg = fnew;
kfree(tb);
- kfree(mask);
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
return 0;
errout_ht:
@@ -1664,7 +1666,7 @@ errout:
errout_tb:
kfree(tb);
errout_mask_alloc:
- kfree(mask);
+ tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
errout_fold:
if (fold)
__fl_put(fold);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f17908f5c4f3..9b0e5b0d701a 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2583,6 +2583,8 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
+ if (asoc->peer.cookie)
+ kfree(asoc->peer.cookie);
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
retval = 0;
@@ -2647,6 +2649,8 @@ do_addr_param:
goto fall_through;
/* Save peer's random parameter */
+ if (asoc->peer.peer_random)
+ kfree(asoc->peer.peer_random);
asoc->peer.peer_random = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_random) {
@@ -2660,6 +2664,8 @@ do_addr_param:
goto fall_through;
/* Save peer's HMAC list */
+ if (asoc->peer.peer_hmacs)
+ kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_hmacs) {
@@ -2675,6 +2681,8 @@ do_addr_param:
if (!ep->auth_enable)
goto fall_through;
+ if (asoc->peer.peer_chunks)
+ kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = kmemdup(param.p,
ntohs(param.p->length), gfp);
if (!asoc->peer.peer_chunks)
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 992be6113676..5f98d38bcf08 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
+ __skb_queue_purge(&m->deferredq);
list_del(&m->list);
kfree(m);
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 960494f437ac..455a782c7658 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1143,7 +1143,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
full_record = false;
record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
- copied = 0;
copy = size;
if (copy >= record_room) {
copy = record_room;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 0a6fe0649945..fb2df6e068fa 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -335,8 +335,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
struct sockaddr_vm addr;
struct sock *sk, *new = NULL;
- struct vsock_sock *vnew;
- struct hvsock *hvs, *hvs_new;
+ struct vsock_sock *vnew = NULL;
+ struct hvsock *hvs, *hvs_new = NULL;
int ret;
if_type = &chan->offermsg.offer.if_type;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 5be105e98ed5..6f1a8aff65c5 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -870,8 +870,10 @@ virtio_transport_recv_connected(struct sock *sk,
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
vsk->peer_shutdown |= SEND_SHUTDOWN;
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
- vsock_stream_has_data(vsk) <= 0)
+ vsock_stream_has_data(vsk) <= 0) {
+ sock_set_flag(sk, SOCK_DONE);
sk->sk_state = TCP_CLOSING;
+ }
if (le32_to_cpu(pkt->hdr.flags))
sk->sk_state_change(sk);
break;
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 72a224ce8e0a..2eee93985ab0 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -39,6 +39,7 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
@(set -e; \
allf=""; \
for f in $^ ; do \
+ test -f $$f || continue;\
# similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
thisf=$$(od -An -v -tx1 < $$f | \
sed -e 's/ /\n/g' | \
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 037816163e70..53ad3dbb76fe 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -514,7 +514,7 @@ use_default_name:
&rdev->rfkill_ops, rdev);
if (!rdev->rfkill) {
- kfree(rdev);
+ wiphy_free(&rdev->wiphy);
return NULL;
}
@@ -1397,8 +1397,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
}
break;
case NETDEV_PRE_UP:
- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
+ if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
+ !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
+ rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
+ wdev->use_4addr))
return notifier_from_errno(-EOPNOTSUPP);
+
if (rfkill_blocked(rdev->rfkill))
return notifier_from_errno(-ERFKILL);
break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c391b560d986..520d437aa8d1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -304,8 +304,11 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
- [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
- [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
+ [NL80211_ATTR_MAC] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
@@ -356,7 +359,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
[NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
- [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN },
+ [NL80211_ATTR_HT_CAPABILITY] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = NL80211_HT_CAPABILITY_LEN
+ },
[NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 },
[NL80211_ATTR_IE] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
@@ -386,7 +392,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
- [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_PMKID] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = WLAN_PMKID_LEN
+ },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -448,7 +457,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
[NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
- [NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
+ [NL80211_ATTR_VHT_CAPABILITY] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = NL80211_VHT_CAPABILITY_LEN
+ },
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127),
[NL80211_ATTR_P2P_OPPPS] = NLA_POLICY_MAX(NLA_U8, 1),
@@ -484,7 +496,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
[NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
.len = IEEE80211_QOS_MAP_LEN_MAX },
- [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+ [NL80211_ATTR_MAC_HINT] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
[NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
[NL80211_ATTR_SOCKET_OWNER] = { .type = NLA_FLAG },
@@ -495,7 +510,10 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
- [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
+ [NL80211_ATTR_MAC_MASK] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
@@ -507,15 +525,21 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MU_MIMO_GROUP_DATA] = {
.len = VHT_MUMIMO_GROUPS_DATA_LEN
},
- [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
+ [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_BANDS] = { .type = NLA_U32 },
[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
.len = FILS_MAX_KEK_LEN },
- [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
+ [NL80211_ATTR_FILS_NONCES] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = 2 * FILS_NONCE_LEN
+ },
[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
- [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+ [NL80211_ATTR_BSSID] = { .type = NLA_EXACT_LEN_WARN, .len = ETH_ALEN },
[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
.len = sizeof(struct nl80211_bss_select_rssi_adjust)
@@ -528,7 +552,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
[NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
.len = FILS_ERP_MAX_RRK_LEN },
- [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+ [NL80211_ATTR_FILS_CACHE_ID] = { .type = NLA_EXACT_LEN_WARN, .len = 2 },
[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
[NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG },
[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
@@ -589,10 +613,13 @@ static const struct nla_policy
nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
[NL80211_WOWLAN_TCP_SRC_IPV4] = { .type = NLA_U32 },
[NL80211_WOWLAN_TCP_DST_IPV4] = { .type = NLA_U32 },
- [NL80211_WOWLAN_TCP_DST_MAC] = { .len = ETH_ALEN },
+ [NL80211_WOWLAN_TCP_DST_MAC] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
[NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
- [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .len = 1 },
+ [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
.len = sizeof(struct nl80211_wowlan_tcp_data_seq)
},
@@ -600,8 +627,8 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
.len = sizeof(struct nl80211_wowlan_tcp_data_token)
},
[NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
- [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .len = 1 },
- [NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
+ [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
+ [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 },
};
#endif /* CONFIG_PM */
@@ -619,9 +646,18 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
/* policy for GTK rekey offload attributes */
static const struct nla_policy
nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
- [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
- [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
- [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
+ [NL80211_REKEY_DATA_KEK] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = NL80211_KEK_LEN,
+ },
+ [NL80211_REKEY_DATA_KCK] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = NL80211_KCK_LEN,
+ },
+ [NL80211_REKEY_DATA_REPLAY_CTR] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = NL80211_REPLAY_CTR_LEN
+ },
};
static const struct nla_policy
@@ -635,7 +671,10 @@ static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
- [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = { .len = ETH_ALEN },
+ [NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
@@ -667,7 +706,10 @@ nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
[NL80211_NAN_FUNC_SUBSCRIBE_ACTIVE] = { .type = NLA_FLAG },
[NL80211_NAN_FUNC_FOLLOW_UP_ID] = { .type = NLA_U8 },
[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] = { .type = NLA_U8 },
- [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = { .len = ETH_ALEN },
+ [NL80211_NAN_FUNC_FOLLOW_UP_DEST] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = ETH_ALEN
+ },
[NL80211_NAN_FUNC_CLOSE_RANGE] = { .type = NLA_FLAG },
[NL80211_NAN_FUNC_TTL] = { .type = NLA_U32 },
[NL80211_NAN_FUNC_SERVICE_INFO] = { .type = NLA_BINARY,
@@ -3420,8 +3462,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_IFTYPE])
type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
- if (!rdev->ops->add_virtual_intf ||
- !(rdev->wiphy.interface_modes & (1 << type)))
+ if (!rdev->ops->add_virtual_intf)
return -EOPNOTSUPP;
if ((type == NL80211_IFTYPE_P2P_DEVICE || type == NL80211_IFTYPE_NAN ||
@@ -3440,6 +3481,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (!(rdev->wiphy.interface_modes & (1 << type)) &&
+ !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
+ rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
+ return -EOPNOTSUPP;
+
err = nl80211_parse_mon_options(rdev, type, info, &params);
if (err < 0)
return err;
@@ -4057,7 +4103,10 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
.len = NL80211_MAX_SUPP_RATES },
[NL80211_TXRATE_HT] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_HT_RATES },
- [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_VHT] = {
+ .type = NLA_EXACT_LEN_WARN,
+ .len = sizeof(struct nl80211_txrate_vht),
+ },
[NL80211_TXRATE_GI] = { .type = NLA_U8 },
};
@@ -4856,8 +4905,10 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
struct nlattr *sinfoattr, *bss_param;
hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
- if (!hdr)
+ if (!hdr) {
+ cfg80211_sinfo_release_content(sinfo);
return -1;
+ }
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index 1b190475359a..c09fbf09549d 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*/
#ifndef __PMSR_H
#define __PMSR_H
@@ -448,7 +448,7 @@ static int nl80211_pmsr_send_result(struct sk_buff *msg,
if (res->ap_tsf_valid &&
nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
- res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
+ res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD))
goto error;
if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index c04f5451f89b..aa571d727903 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1601,12 +1601,12 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
continue;
}
- if (seen_indices & BIT(mbssid_index_ie[2]))
+ if (seen_indices & BIT_ULL(mbssid_index_ie[2]))
/* We don't support legacy split of a profile */
net_dbg_ratelimited("Partial info for BSSID index %d\n",
mbssid_index_ie[2]);
- seen_indices |= BIT(mbssid_index_ie[2]);
+ seen_indices |= BIT_ULL(mbssid_index_ie[2]);
non_tx_data->bssid_index = mbssid_index_ie[2];
non_tx_data->max_bssid_indicator = elem->data[0];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index cf63b635afc0..1c39d6a2e850 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1246,7 +1246,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
if (rate->he_dcm)
result /= 2;
- return result;
+ return result / 10000;
}
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
@@ -1998,7 +1998,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
continue;
if (supp >= mcs_encoding) {
- max_vht_nss = i;
+ max_vht_nss = i + 1;
break;
}
}
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 2b18223e7eb8..9c6de4f114f8 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -143,6 +143,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
struct netdev_bpf bpf;
int err;
+ if (!umem->dev)
+ return;
+
if (umem->zc) {
bpf.command = XDP_SETUP_XSK_UMEM;
bpf.xsk.umem = NULL;
@@ -156,11 +159,9 @@ static void xdp_umem_clear_dev(struct xdp_umem *umem)
WARN(1, "failed to disable umem!\n");
}
- if (umem->dev) {
- rtnl_lock();
- xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
- rtnl_unlock();
- }
+ rtnl_lock();
+ xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
+ rtnl_unlock();
if (umem->zc) {
dev_put(umem->dev);