From 06928b3870d213cb42b00e2949419f2d44b9d45f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 12 Jan 2016 20:17:08 +0100 Subject: net: bpf: reject invalid shifts On ARM64, a BUG() is triggered in the eBPF JIT if a filter with a constant shift that can't be encoded in the immediate field of the UBFM/SBFM instructions is passed to the JIT. Since these shifts amounts, which are negative or >= regsize, are invalid, reject them in the eBPF verifier and the classic BPF filter checker, for all architectures. Signed-off-by: Rabin Vincent Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/core/filter.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 672eefbfbe99..37157c4c1a78 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -777,6 +777,11 @@ static int bpf_check_classic(const struct sock_filter *filter, if (ftest->k == 0) return -EINVAL; break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + if (ftest->k >= 32) + return -EINVAL; + break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: case BPF_ST: -- cgit From ccdf6ce6a8dba374668ae9b4d763e19903611c38 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Mon, 11 Jan 2016 14:26:19 +0200 Subject: net: netlink: Fix multicast group storage allocation for families with more than one groups Multicast groups are stored in global buffer. Check for needed buffer size incorrectly compares buffer size to first id for family. This means that for families with more than one mcast id one may allocate too small buffer and end up writing rest of the groups to some unallocated memory. Fix the buffer size check to compare allocated space to last mcast id for the family. Tested on ARM using kernel 3.14 Signed-off-by: Matti Vaittinen Signed-off-by: David S. Miller --- net/netlink/genetlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index bc0e504f33a6..a992083c0a64 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -185,7 +185,7 @@ static int genl_allocate_reserve_groups(int n_groups, int *first_id) } } - if (id >= mc_groups_longs * BITS_PER_LONG) { + if (id + n_groups >= mc_groups_longs * BITS_PER_LONG) { unsigned long new_longs = mc_groups_longs + BITS_TO_LONGS(n_groups); size_t nlen = new_longs * sizeof(unsigned long); -- cgit From af63cf51b7f960aa73b32bac683cd4078f08fa0e Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Mon, 30 Nov 2015 17:34:01 +0100 Subject: batman-adv: fix lockdep splat when doing mcast_free While testing, we got something like this: WARNING: CPU: 0 PID: 238 at net/batman-adv/multicast.c:142 batadv_mcast_mla_tt_retract+0x94/0x205 [batman_adv]() [...] Call Trace: [] dump_stack+0x4b/0x64 [] warn_slowpath_common+0xbc/0x120 [] ? batadv_mcast_mla_tt_retract+0x94/0x205 [batman_adv] [] warn_slowpath_null+0x15/0x20 [] batadv_mcast_mla_tt_retract+0x94/0x205 [batman_adv] [] batadv_mcast_free+0x36/0x39 [batman_adv] [] batadv_mesh_free+0x7d/0x13f [batman_adv] [] batadv_softif_free+0x15/0x25 [batman_adv] [...] Signed-off-by: Simon Wunderlich Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/multicast.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index eb76386f8d4b..75fa5013af72 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -802,7 +802,9 @@ void batadv_mcast_free(struct batadv_priv *bat_priv) batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 1); batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 1); + spin_lock_bh(&bat_priv->tt.commit_lock); batadv_mcast_mla_tt_retract(bat_priv, NULL); + spin_unlock_bh(&bat_priv->tt.commit_lock); } /** -- cgit From bab7c6c3deac70966a3000402c0ea6d0c20edd15 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Tue, 5 Jan 2016 12:06:17 +0100 Subject: batman-adv: Fix list removal of batadv_hardif_neigh_node The neigh_list with batadv_hardif_neigh_node objects is accessed with only rcu_read_lock in batadv_hardif_neigh_get and batadv_iv_neigh_print. Thus it is not allowed to kfree the object before the rcu grace period ends (which may still protects context accessing this object). Therefore the object has first to be removed from the neigh_list and then it has either wait with synchronize_rcu or call_rcu till the grace period ends before it can be freed. Fixes: cef63419f7db ("batman-adv: add list of unique single hop neighbors per hard-interface") Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner Signed-off-by: Antonio Quartulli --- net/batman-adv/originator.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 3c782a33bdac..ae6d18cafc5a 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -211,10 +211,6 @@ static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu) hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu); - spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - hlist_del_init_rcu(&hardif_neigh->list); - spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - batadv_hardif_free_ref_now(hardif_neigh->if_incoming); kfree(hardif_neigh); } @@ -227,8 +223,13 @@ static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu) static void batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh) { - if (atomic_dec_and_test(&hardif_neigh->refcount)) + if (atomic_dec_and_test(&hardif_neigh->refcount)) { + spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + hlist_del_init_rcu(&hardif_neigh->list); + spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu); + } } /** @@ -238,8 +239,13 @@ batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh) */ void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh) { - if (atomic_dec_and_test(&hardif_neigh->refcount)) + if (atomic_dec_and_test(&hardif_neigh->refcount)) { + spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + hlist_del_init_rcu(&hardif_neigh->list); + spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu); + } } /** -- cgit From b8e429a2feac623a34e21099a4a69de29b6d873e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Jan 2016 10:28:06 -0500 Subject: genetlink: Fix off-by-one in genl_allocate_reserve_groups() The bug fix for adding n_groups to the computation forgot to adjust ">=" to ">" to keep the condition correct. Signed-off-by: David S. Miller --- net/netlink/genetlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index d3f6b063467b..f830326b3b1d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -185,7 +185,7 @@ static int genl_allocate_reserve_groups(int n_groups, int *first_id) } } - if (id + n_groups >= mc_groups_longs * BITS_PER_LONG) { + if (id + n_groups > mc_groups_longs * BITS_PER_LONG) { unsigned long new_longs = mc_groups_longs + BITS_TO_LONGS(n_groups); size_t nlen = new_longs * sizeof(unsigned long); -- cgit From 9207f9d45b0ad071baa128e846d7e7ed85016df3 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Fri, 8 Jan 2016 15:21:46 +0300 Subject: net: preserve IP control block during GSO segmentation Skb_gso_segment() uses skb control block during segmentation. This patch adds 32-bytes room for previous control block which will be copied into all resulting segments. This patch fixes kernel crash during fragmenting forwarded packets. Fragmentation requires valid IP CB in skb for clearing ip options. Also patch removes custom save/restore in ovs code, now it's redundant. Signed-off-by: Konstantin Khlebnikov Link: http://lkml.kernel.org/r/CALYGNiP-0MZ-FExV2HutTvE9U-QQtkKSoE--KN=JQE5STYsjAA@mail.gmail.com Signed-off-by: David S. Miller --- net/core/dev.c | 5 +++++ net/ipv4/ip_output.c | 1 + net/openvswitch/datapath.c | 5 +---- net/xfrm/xfrm_output.c | 2 ++ 4 files changed, 9 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 0ca95d5d7af0..cc9e3652cf93 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2695,6 +2695,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) * * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. + * + * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. */ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) @@ -2709,6 +2711,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, return ERR_PTR(err); } + BUILD_BUG_ON(SKB_SGO_CB_OFFSET + + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); + SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); SKB_GSO_CB(skb)->encap_level = 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 512a44778cf2..64878efa045c 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -239,6 +239,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, * from host network stack. */ features = netif_skb_features(skb); + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) { kfree_skb(skb); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 91a8b004dc51..deadfdab1bc3 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -336,12 +336,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, unsigned short gso_type = skb_shinfo(skb)->gso_type; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; - struct ovs_skb_cb ovs_cb; int err; - ovs_cb = *OVS_CB(skb); + BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET); segs = __skb_gso_segment(skb, NETIF_F_SG, false); - *OVS_CB(skb) = ovs_cb; if (IS_ERR(segs)) return PTR_ERR(segs); if (segs == NULL) @@ -359,7 +357,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, /* Queue all of the segments. */ skb = segs; do { - *OVS_CB(skb) = ovs_cb; if (gso_type & SKB_GSO_UDP && skb != segs) key = &later_key; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index cc3676eb6239..ff4a91fcab9f 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -167,6 +167,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb { struct sk_buff *segs; + BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); + BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET); segs = skb_gso_segment(skb, 0); kfree_skb(skb); if (IS_ERR(segs)) -- cgit From 65a5124a71e85c35fa8d047a471950325855dccf Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 14 Jan 2016 13:49:34 +0800 Subject: sctp: support to lookup with ep+paddr in transport rhashtable Now, when we sendmsg, we translate the ep to laddr by selecting the first element of the list, and then do a lookup for a transport. But sctp_hash_cmp() will compare it against asoc addr_list, which may be a subset of ep addr_list, meaning that this chosen laddr may not be there, and thus making it impossible to find the transport. So we fix it by using ep + paddr to lookup transports in hashtable. In sctp_hash_cmp, if .ep is set, we will check if this ep == asoc->ep, or we will do the laddr check. Fixes: d6c0256a60e6 ("sctp: add the rhashtable apis for sctp global transport hashtable") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Reported-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/input.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/sctp/input.c b/net/sctp/input.c index d9a6e66c5c8a..b9a536b52da2 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -784,6 +784,7 @@ hit: /* rhashtable for transport */ struct sctp_hash_cmp_arg { + const struct sctp_endpoint *ep; const union sctp_addr *laddr; const union sctp_addr *paddr; const struct net *net; @@ -797,15 +798,20 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, struct sctp_association *asoc = t->asoc; const struct net *net = x->net; - if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) - return 1; if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr)) return 1; if (!net_eq(sock_net(asoc->base.sk), net)) return 1; - if (!sctp_bind_addr_match(&asoc->base.bind_addr, - x->laddr, sctp_sk(asoc->base.sk))) - return 1; + if (x->ep) { + if (x->ep != asoc->ep) + return 1; + } else { + if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) + return 1; + if (!sctp_bind_addr_match(&asoc->base.bind_addr, + x->laddr, sctp_sk(asoc->base.sk))) + return 1; + } return 0; } @@ -832,9 +838,11 @@ static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed) const struct sctp_hash_cmp_arg *x = data; const union sctp_addr *paddr = x->paddr; const struct net *net = x->net; - u16 lport = x->laddr->v4.sin_port; + u16 lport; u32 addr; + lport = x->ep ? htons(x->ep->base.bind_addr.port) : + x->laddr->v4.sin_port; if (paddr->sa.sa_family == AF_INET6) addr = jhash(&paddr->v6.sin6_addr, 16, seed); else @@ -864,12 +872,9 @@ void sctp_transport_hashtable_destroy(void) void sctp_hash_transport(struct sctp_transport *t) { - struct sctp_sockaddr_entry *addr; struct sctp_hash_cmp_arg arg; - addr = list_entry(t->asoc->base.bind_addr.address_list.next, - struct sctp_sockaddr_entry, list); - arg.laddr = &addr->a; + arg.ep = t->asoc->ep; arg.paddr = &t->ipaddr; arg.net = sock_net(t->asoc->base.sk); @@ -891,6 +896,7 @@ struct sctp_transport *sctp_addrs_lookup_transport( const union sctp_addr *paddr) { struct sctp_hash_cmp_arg arg = { + .ep = NULL, .laddr = laddr, .paddr = paddr, .net = net, @@ -904,13 +910,15 @@ struct sctp_transport *sctp_epaddr_lookup_transport( const struct sctp_endpoint *ep, const union sctp_addr *paddr) { - struct sctp_sockaddr_entry *addr; struct net *net = sock_net(ep->base.sk); + struct sctp_hash_cmp_arg arg = { + .ep = ep, + .paddr = paddr, + .net = net, + }; - addr = list_entry(ep->base.bind_addr.address_list.next, - struct sctp_sockaddr_entry, list); - - return sctp_addrs_lookup_transport(net, &addr->a, paddr); + return rhashtable_lookup_fast(&sctp_transport_hashtable, &arg, + sctp_hash_params); } /* Look up an association. */ -- cgit From 34ae6a1aa0540f0f781dd265366036355fdc8930 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 15 Jan 2016 04:56:56 -0800 Subject: ipv6: update skb->csum when CE mark is propagated When a tunnel decapsulates the outer header, it has to comply with RFC 6080 and eventually propagate CE mark into inner header. It turns out IP6_ECN_set_ce() does not correctly update skb->csum for CHECKSUM_COMPLETE packets, triggering infamous "hw csum failure" messages and stack traces. Signed-off-by: Eric Dumazet Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/ipv6/xfrm6_mode_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index f7fbdbabe50e..372855eeaf42 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -23,7 +23,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) struct ipv6hdr *inner_iph = ipipv6_hdr(skb); if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) - IP6_ECN_set_ce(inner_iph); + IP6_ECN_set_ce(skb, inner_iph); } /* Add encapsulation header. -- cgit From fb3311853c0f23391fc3441d49a46d076de57757 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 15 Jan 2016 14:44:31 +0100 Subject: net: sctp: Move sequence start handling into sctp_transport_get_idx() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit net/sctp/proc.c: In function ‘sctp_transport_get_idx’: net/sctp/proc.c:313: warning: ‘obj’ may be used uninitialized in this function This is currently a false positive, as all callers check for a zero offset first, and handle this case in the exact same way. Move the check and handling into sctp_transport_get_idx() to kill the compiler warning, and avoid future bugs. Signed-off-by: Geert Uytterhoeven Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/proc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sctp/proc.c b/net/sctp/proc.c index dfa7eeccb537..684c5b31563b 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -310,7 +310,7 @@ static struct sctp_transport *sctp_transport_get_next(struct seq_file *seq) static struct sctp_transport *sctp_transport_get_idx(struct seq_file *seq, loff_t pos) { - void *obj; + void *obj = SEQ_START_TOKEN; while (pos && (obj = sctp_transport_get_next(seq)) && !IS_ERR(obj)) pos--; @@ -347,7 +347,7 @@ static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) if (err) return ERR_PTR(err); - return *pos ? sctp_transport_get_idx(seq, *pos) : SEQ_START_TOKEN; + return sctp_transport_get_idx(seq, *pos); } static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) @@ -462,7 +462,7 @@ static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) if (err) return ERR_PTR(err); - return *pos ? sctp_transport_get_idx(seq, *pos) : SEQ_START_TOKEN; + return sctp_transport_get_idx(seq, *pos); } static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) -- cgit From c6894dec8ea9ae05747124dce98b3b5c2e69b168 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 15 Jan 2016 19:03:54 +0100 Subject: bridge: fix lockdep addr_list_lock false positive splat After promisc mode management was introduced a bridge device could do dev_set_promiscuity from its ndo_change_rx_flags() callback which in turn can be called after the bridge's addr_list_lock has been taken (e.g. by dev_uc_add). This causes a false positive lockdep splat because the port interfaces' addr_list_lock is taken when br_manage_promisc() runs after the bridge's addr list lock was already taken. To remove the false positive introduce a custom bridge addr_list_lock class and set it on bridge init. A simple way to reproduce this is with the following: $ brctl addbr br0 $ ip l add l br0 br0.100 type vlan id 100 $ ip l set br0 up $ ip l set br0.100 up $ echo 1 > /sys/class/net/br0/bridge/vlan_filtering $ brctl addif br0 eth0 Splat: [ 43.684325] ============================================= [ 43.684485] [ INFO: possible recursive locking detected ] [ 43.684636] 4.4.0-rc8+ #54 Not tainted [ 43.684755] --------------------------------------------- [ 43.684906] brctl/1187 is trying to acquire lock: [ 43.685047] (_xmit_ETHER){+.....}, at: [] dev_set_rx_mode+0x1e/0x40 [ 43.685460] but task is already holding lock: [ 43.685618] (_xmit_ETHER){+.....}, at: [] dev_uc_add+0x27/0x80 [ 43.686015] other info that might help us debug this: [ 43.686316] Possible unsafe locking scenario: [ 43.686743] CPU0 [ 43.686967] ---- [ 43.687197] lock(_xmit_ETHER); [ 43.687544] lock(_xmit_ETHER); [ 43.687886] *** DEADLOCK *** [ 43.688438] May be due to missing lock nesting notation [ 43.688882] 2 locks held by brctl/1187: [ 43.689134] #0: (rtnl_mutex){+.+.+.}, at: [] rtnl_lock+0x17/0x20 [ 43.689852] #1: (_xmit_ETHER){+.....}, at: [] dev_uc_add+0x27/0x80 [ 43.690575] stack backtrace: [ 43.690970] CPU: 0 PID: 1187 Comm: brctl Not tainted 4.4.0-rc8+ #54 [ 43.691270] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.1-20150318_183358- 04/01/2014 [ 43.691770] ffffffff826a25c0 ffff8800369fb8e0 ffffffff81360ceb ffffffff826a25c0 [ 43.692425] ffff8800369fb9b8 ffffffff810d0466 ffff8800369fb968 ffffffff81537139 [ 43.693071] ffff88003a08c880 0000000000000000 00000000ffffffff 0000000002080020 [ 43.693709] Call Trace: [ 43.693931] [] dump_stack+0x4b/0x70 [ 43.694199] [] __lock_acquire+0x1e46/0x1e90 [ 43.694483] [] ? netlink_broadcast_filtered+0x139/0x3e0 [ 43.694789] [] ? nlmsg_notify+0x5a/0xc0 [ 43.695064] [] lock_acquire+0xe5/0x1f0 [ 43.695340] [] ? dev_set_rx_mode+0x1e/0x40 [ 43.695623] [] _raw_spin_lock_bh+0x45/0x80 [ 43.695901] [] ? dev_set_rx_mode+0x1e/0x40 [ 43.696180] [] dev_set_rx_mode+0x1e/0x40 [ 43.696460] [] dev_set_promiscuity+0x3c/0x50 [ 43.696750] [] br_port_set_promisc+0x25/0x50 [bridge] [ 43.697052] [] br_manage_promisc+0x8a/0xe0 [bridge] [ 43.697348] [] br_dev_change_rx_flags+0x1e/0x20 [bridge] [ 43.697655] [] __dev_set_promiscuity+0x132/0x1f0 [ 43.697943] [] __dev_set_rx_mode+0x82/0x90 [ 43.698223] [] dev_uc_add+0x5e/0x80 [ 43.698498] [] vlan_device_event+0x542/0x650 [8021q] [ 43.698798] [] notifier_call_chain+0x5d/0x80 [ 43.699083] [] raw_notifier_call_chain+0x16/0x20 [ 43.699374] [] call_netdevice_notifiers_info+0x6e/0x80 [ 43.699678] [] call_netdevice_notifiers+0x16/0x20 [ 43.699973] [] br_add_if+0x47e/0x4c0 [bridge] [ 43.700259] [] add_del_if+0x6e/0x80 [bridge] [ 43.700548] [] br_dev_ioctl+0xaf/0xc0 [bridge] [ 43.700836] [] dev_ifsioc+0x30c/0x3c0 [ 43.701106] [] dev_ioctl+0xf9/0x6f0 [ 43.701379] [] ? mntput_no_expire+0x5/0x450 [ 43.701665] [] ? mntput_no_expire+0xae/0x450 [ 43.701947] [] sock_do_ioctl+0x42/0x50 [ 43.702219] [] sock_ioctl+0x1e5/0x290 [ 43.702500] [] do_vfs_ioctl+0x2cb/0x5c0 [ 43.702771] [] SyS_ioctl+0x79/0x90 [ 43.703033] [] entry_SYSCALL_64_fastpath+0x16/0x7a CC: Vlad Yasevich CC: Stephen Hemminger CC: Bridge list CC: Andy Gospodarek CC: Roopa Prabhu Fixes: 2796d0c648c9 ("bridge: Automatically manage port promiscuous mode.") Reported-by: Andy Gospodarek Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_device.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net') diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 5e88d3e17546..2c8095a5d824 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -28,6 +28,8 @@ const struct nf_br_ops __rcu *nf_br_ops __read_mostly; EXPORT_SYMBOL_GPL(nf_br_ops); +static struct lock_class_key bridge_netdev_addr_lock_key; + /* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -87,6 +89,11 @@ out: return NETDEV_TX_OK; } +static void br_set_lockdep_class(struct net_device *dev) +{ + lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); +} + static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); @@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev) err = br_vlan_init(br); if (err) free_percpu(br->stats); + br_set_lockdep_class(dev); return err; } -- cgit