diff options
Diffstat (limited to 'net/bridge')
30 files changed, 804 insertions, 356 deletions
diff --git a/net/bridge/br.c b/net/bridge/br.c index ac19b797dbec..0adeafe11a36 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -51,6 +51,13 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v } } + if (is_vlan_dev(dev)) { + struct net_device *real_dev = vlan_dev_real_dev(dev); + + if (netif_is_bridge_master(real_dev)) + br_vlan_vlan_upper_event(real_dev, dev, event); + } + /* not a port of a bridge */ p = br_port_get_rtnl(dev); if (!p) @@ -277,6 +284,9 @@ int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on, case BR_BOOLOPT_MST_ENABLE: err = br_mst_set_enabled(br, on, extack); break; + case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION: + br_opt_toggle(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION, on); + break; default: /* shouldn't be called with unsupported options */ WARN_ON(1); @@ -295,6 +305,8 @@ int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt) return br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED); case BR_BOOLOPT_MST_ENABLE: return br_opt_get(br, BROPT_MST_ENABLED); + case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION: + return br_opt_get(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION); default: /* shouldn't be called with unsupported options */ WARN_ON(1); @@ -356,26 +368,20 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on) clear_bit(opt, &br->options); } -static void __net_exit br_net_exit_batch(struct list_head *net_list) +static void __net_exit br_net_exit_rtnl(struct net *net, + struct list_head *dev_to_kill) { struct net_device *dev; - struct net *net; - LIST_HEAD(list); - - rtnl_lock(); - - list_for_each_entry(net, net_list, exit_list) - for_each_netdev(net, dev) - if (netif_is_bridge_master(dev)) - br_dev_delete(dev, &list); - unregister_netdevice_many(&list); + ASSERT_RTNL_NET(net); - rtnl_unlock(); + for_each_netdev(net, dev) + if (netif_is_bridge_master(dev)) + br_dev_delete(dev, dev_to_kill); } static struct pernet_operations br_net_ops = { - .exit_batch = br_net_exit_batch, + .exit_rtnl = br_net_exit_rtnl, }; static const struct stp_proto br_stp_proto = { diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c index c7869a286df4..1e2b51769eec 100644 --- a/net/bridge/br_arp_nd_proxy.c +++ b/net/bridge/br_arp_nd_proxy.c @@ -160,6 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { if (br_is_neigh_suppress_enabled(p, vid)) return; + if (is_unicast_ether_addr(eth_hdr(skb)->h_dest) && + parp->ar_op == htons(ARPOP_REQUEST)) + return; if (parp->ar_op != htons(ARPOP_RREQUEST) && parp->ar_op != htons(ARPOP_RREPLY) && (ipv4_is_zeronet(sip) || sip == tip)) { @@ -229,7 +232,7 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, #endif #if IS_ENABLED(CONFIG_IPV6) -struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *msg) +struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *msg) { struct nd_msg *m; @@ -410,6 +413,10 @@ void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, if (br_is_neigh_suppress_enabled(p, vid)) return; + if (is_unicast_ether_addr(eth_hdr(skb)->h_dest) && + msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) + return; + if (msg->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT && !msg->icmph.icmp6_solicited) { /* prevent flooding to neigh suppress ports */ diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 65cee0ad3c1b..a818fdc22da9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -16,6 +16,8 @@ #include <linux/netfilter_bridge.h> #include <linux/uaccess.h> +#include <net/netdev_lock.h> + #include "br_private.h" #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ @@ -27,6 +29,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops); /* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { + enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN); struct net_bridge_mcast_port *pmctx_null = NULL; struct net_bridge *br = netdev_priv(dev); struct net_bridge_mcast *brmctx = &br->multicast_ctx; @@ -38,6 +41,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) const unsigned char *dest; u16 vid = 0; + if (unlikely(reason != SKB_NOT_DROPPED_YET)) { + kfree_skb_reason(skb, reason); + return NETDEV_TX_OK; + } + memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); br_tc_skb_miss_set(skb, false); @@ -108,38 +116,23 @@ out: return NETDEV_TX_OK; } -static struct lock_class_key bridge_netdev_addr_lock_key; - -static void br_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key); -} - static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); int err; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; - err = br_fdb_hash_init(br); - if (err) { - free_percpu(dev->tstats); + if (err) return err; - } err = br_mdb_hash_init(br); if (err) { - free_percpu(dev->tstats); br_fdb_hash_fini(br); return err; } err = br_vlan_init(br); if (err) { - free_percpu(dev->tstats); br_mdb_hash_fini(br); br_fdb_hash_fini(br); return err; @@ -147,14 +140,14 @@ static int br_dev_init(struct net_device *dev) err = br_multicast_init_stats(br); if (err) { - free_percpu(dev->tstats); br_vlan_flush(br); br_mdb_hash_fini(br); br_fdb_hash_fini(br); + return err; } - br_set_lockdep_class(dev); - return err; + netdev_lockdep_set_classes(dev); + return 0; } static void br_dev_uninit(struct net_device *dev) @@ -166,7 +159,6 @@ static void br_dev_uninit(struct net_device *dev) br_vlan_flush(br); br_mdb_hash_fini(br); br_fdb_hash_fini(br); - free_percpu(dev->tstats); } static int br_dev_open(struct net_device *dev) @@ -213,7 +205,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu) { struct net_bridge *br = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); /* this flag will be cleared if the MTU was automatically adjusted */ br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); @@ -338,7 +330,7 @@ int br_netpoll_enable(struct net_bridge_port *p) return __br_netpoll_enable(p); } -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int br_netpoll_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; @@ -405,7 +397,7 @@ static int br_fill_forward_path(struct net_device_path_ctx *ctx, br_vlan_fill_forward_path_pvid(br, ctx, path); f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id); - if (!f || !f->dst) + if (!f) return -1; dst = READ_ONCE(f->dst); @@ -481,7 +473,7 @@ static const struct net_device_ops br_netdev_ops = { .ndo_fill_forward_path = br_fill_forward_path, }; -static struct device_type br_type = { +static const struct device_type br_type = { .name = "bridge", }; @@ -497,12 +489,15 @@ void br_dev_setup(struct net_device *dev) dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; + dev->lltx = true; + dev->netns_immutable = true; - dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; + dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->vlan_features = COMMON_FEATURES; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; br->dev = dev; spin_lock_init(&br->lock); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c622de5eccd0..902694c0ce64 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -18,7 +18,7 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/atomic.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/if_vlan.h> #include <net/switchdev.h> #include <trace/events/bridge.h> @@ -35,10 +35,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly; int __init br_fdb_init(void) { - br_fdb_cache = kmem_cache_create("bridge_fdb_cache", - sizeof(struct net_bridge_fdb_entry), - 0, - SLAB_HWCACHE_ALIGN, NULL); + br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN); if (!br_fdb_cache) return -ENOMEM; @@ -76,13 +73,6 @@ static inline int has_expired(const struct net_bridge *br, time_before_eq(fdb->updated + hold_time(br), jiffies); } -static void fdb_rcu_free(struct rcu_head *head) -{ - struct net_bridge_fdb_entry *ent - = container_of(head, struct net_bridge_fdb_entry, rcu); - kmem_cache_free(br_fdb_cache, ent); -} - static int fdb_to_nud(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { @@ -332,7 +322,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f, if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags)) atomic_dec(&br->fdb_n_learned); fdb_notify(br, f, RTM_DELNEIGH, swdev_notify); - call_rcu(&f->rcu, fdb_rcu_free); + kfree_rcu(f, rcu); } /* Delete a local entry if no other port had the same address. @@ -965,6 +955,7 @@ int br_fdb_dump(struct sk_buff *skb, struct net_device *filter_dev, int *idx) { + struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; struct net_bridge *br = netdev_priv(dev); struct net_bridge_fdb_entry *f; int err = 0; @@ -980,7 +971,7 @@ int br_fdb_dump(struct sk_buff *skb, rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { - if (*idx < cb->args[2]) + if (*idx < ctx->fdb_idx) goto skip; if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) { if (filter_dev != dev) @@ -1162,7 +1153,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[], - struct netlink_ext_ack *extack) + bool *notified, struct netlink_ext_ack *extack) { int err = 0; @@ -1193,6 +1184,8 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, spin_unlock_bh(&br->hash_lock); } + if (!err) + *notified = true; return err; } @@ -1205,7 +1198,7 @@ static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, - struct netlink_ext_ack *extack) + bool *notified, struct netlink_ext_ack *extack) { struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; struct net_bridge_vlan_group *vg; @@ -1268,10 +1261,10 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* VID was specified, so use it. */ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb, - extack); + notified, extack); } else { err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb, - extack); + notified, extack); if (err || !vg || !vg->num_vlans) goto out; @@ -1283,7 +1276,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!br_vlan_should_use(v)) continue; err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, - nfea_tb, extack); + nfea_tb, notified, extack); if (err) goto out; } @@ -1295,7 +1288,7 @@ out: static int fdb_delete_by_addr_and_port(struct net_bridge *br, const struct net_bridge_port *p, - const u8 *addr, u16 vlan) + const u8 *addr, u16 vlan, bool *notified) { struct net_bridge_fdb_entry *fdb; @@ -1304,18 +1297,19 @@ static int fdb_delete_by_addr_and_port(struct net_bridge *br, return -ENOENT; fdb_delete(br, fdb, true); + *notified = true; return 0; } static int __br_fdb_delete(struct net_bridge *br, const struct net_bridge_port *p, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, bool *notified) { int err; spin_lock_bh(&br->hash_lock); - err = fdb_delete_by_addr_and_port(br, p, addr, vid); + err = fdb_delete_by_addr_and_port(br, p, addr, vid, notified); spin_unlock_bh(&br->hash_lock); return err; @@ -1324,12 +1318,11 @@ static int __br_fdb_delete(struct net_bridge *br, /* Remove neighbor entry with RTM_DELNEIGH */ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid, + const unsigned char *addr, u16 vid, bool *notified, struct netlink_ext_ack *extack) { struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; - struct net_bridge_vlan *v; struct net_bridge *br; int err; @@ -1348,23 +1341,19 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], } if (vid) { - v = br_vlan_find(vg, vid); - if (!v) { - pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); - return -EINVAL; - } - - err = __br_fdb_delete(br, p, addr, vid); + err = __br_fdb_delete(br, p, addr, vid, notified); } else { + struct net_bridge_vlan *v; + err = -ENOENT; - err &= __br_fdb_delete(br, p, addr, 0); + err &= __br_fdb_delete(br, p, addr, 0, notified); if (!vg || !vg->num_vlans) return err; list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - err &= __br_fdb_delete(br, p, addr, v->vid); + err &= __br_fdb_delete(br, p, addr, v->vid, notified); } } @@ -1472,12 +1461,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, modified = true; } - if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { + if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { /* Refresh entry */ fdb->used = jiffies; - } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) { - /* Take over SW learned entry */ - set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); + } else { modified = true; } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 7431f89e897b..29097e984b4f 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p, vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && - p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && - nbp_switchdev_allowed_egress(p, skb) && + (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && + br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && !br_skb_isolated(p, skb); } @@ -201,6 +201,7 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, enum br_pkt_type pkt_type, bool local_rcv, bool local_orig, u16 vid) { + enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; struct net_bridge_port *prev = NULL; struct net_bridge_port *p; @@ -234,8 +235,11 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, continue; prev = maybe_deliver(prev, p, skb, local_orig); - if (IS_ERR(prev)) + if (IS_ERR(prev)) { + reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : + SKB_DROP_REASON_NOT_SPECIFIED; goto out; + } } if (!prev) @@ -249,7 +253,7 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, out: if (!local_rcv) - kfree_skb(skb); + kfree_skb_reason(skb, reason); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING @@ -258,6 +262,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; const unsigned char *src = eth_hdr(skb)->h_source; + struct sk_buff *nskb; if (!should_deliver(p, skb)) return; @@ -266,12 +271,16 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, if (skb->dev == p->dev && ether_addr_equal(src, addr)) return; - skb = skb_copy(skb, GFP_ATOMIC); - if (!skb) { + __skb_push(skb, ETH_HLEN); + nskb = pskb_copy(skb, GFP_ATOMIC); + __skb_pull(skb, ETH_HLEN); + if (!nskb) { DEV_STATS_INC(dev, tx_dropped); return; } + skb = nskb; + __skb_pull(skb, ETH_HLEN); if (!is_broadcast_ether_addr(addr)) memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); @@ -284,6 +293,7 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct net_bridge_mcast *brmctx, bool local_rcv, bool local_orig) { + enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; struct net_bridge_port *prev = NULL; struct net_bridge_port_group *p; bool allow_mode_include = true; @@ -324,8 +334,11 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, } prev = maybe_deliver(prev, port, skb, local_orig); - if (IS_ERR(prev)) + if (IS_ERR(prev)) { + reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : + SKB_DROP_REASON_NOT_SPECIFIED; goto out; + } delivered: if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); @@ -344,6 +357,6 @@ delivered: out: if (!local_rcv) - kfree_skb(skb); + kfree_skb_reason(skb, reason); } #endif diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index f21097e73482..5f6ac9bf1527 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) return netif_receive_skb(skb); } -static int br_pass_frame_up(struct sk_buff *skb) +static int br_pass_frame_up(struct sk_buff *skb, bool promisc) { struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(brdev); @@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb) br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), BR_MCAST_DIR_TX); + BR_INPUT_SKB_CB(skb)->promisc = promisc; + return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(indev), NULL, skb, indev, NULL, br_netif_receive_skb); @@ -73,6 +75,7 @@ static int br_pass_frame_up(struct sk_buff *skb) /* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct net_bridge_port *p = br_port_get_rcu(skb->dev); enum br_pkt_type pkt_type = BR_PKT_UNICAST; struct net_bridge_fdb_entry *dst = NULL; @@ -82,6 +85,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb struct net_bridge_mcast *brmctx; struct net_bridge_vlan *vlan; struct net_bridge *br; + bool promisc; u16 vid = 0; u8 state; @@ -93,8 +97,10 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (br_mst_is_enabled(br)) { state = BR_STATE_FORWARDING; } else { - if (p->state == BR_STATE_DISABLED) + if (p->state == BR_STATE_DISABLED) { + reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; goto drop; + } state = p->state; } @@ -137,7 +143,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if (p->flags & BR_LEARNING) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); - local_rcv = !!(br->dev->flags & IFF_PROMISC); + promisc = !!(br->dev->flags & IFF_PROMISC); + local_rcv = promisc; + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { /* by definition the broadcast is also a multicast address */ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) { @@ -150,8 +158,10 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb } } - if (state == BR_STATE_LEARNING) + if (state == BR_STATE_LEARNING) { + reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; goto drop; + } BR_INPUT_SKB_CB(skb)->brdev = br->dev; BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED); @@ -179,7 +189,8 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { if ((mdst && mdst->host_joined) || - br_multicast_is_router(brmctx, skb)) { + br_multicast_is_router(brmctx, skb) || + br->dev->flags & IFF_ALLMULTI) { local_rcv = true; DEV_STATS_INC(br->dev, multicast); } @@ -200,7 +211,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb unsigned long now = jiffies; if (test_bit(BR_FDB_LOCAL, &dst->flags)) - return br_pass_frame_up(skb); + return br_pass_frame_up(skb, false); if (now != dst->used) dst->used = now; @@ -213,12 +224,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb } if (local_rcv) - return br_pass_frame_up(skb); + return br_pass_frame_up(skb, promisc); out: return 0; drop: - kfree_skb(skb); + kfree_skb_reason(skb, reason); goto out; } EXPORT_SYMBOL_GPL(br_handle_frame_finish); @@ -319,6 +330,7 @@ static int br_process_frame_type(struct net_bridge_port *p, */ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct net_bridge_port *p; struct sk_buff *skb = *pskb; const unsigned char *dest = eth_hdr(skb)->h_dest; @@ -326,8 +338,10 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; - if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) + if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) { + reason = SKB_DROP_REASON_MAC_INVALID_SOURCE; goto drop; + } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) @@ -369,6 +383,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) return RX_HANDLER_PASS; case 0x01: /* IEEE MAC (Pause) */ + reason = SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL; goto drop; case 0x0E: /* 802.1AB LLDP */ @@ -386,6 +401,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) goto forward; } + BR_INPUT_SKB_CB(skb)->promisc = false; + /* The else clause should be hit when nf_hook(): * - returns < 0 (drop/error) * - returns = 0 (stolen/nf_queue) @@ -416,8 +433,9 @@ defer_stp_filtering: return nf_hook_bridge_pre(skb, pskb); default: + reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; drop: - kfree_skb(skb); + kfree_skb_reason(skb, reason); } return RX_HANDLER_CONSUMED; } diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index f213ed108361..6bc0a11f2ed3 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -394,10 +394,26 @@ static int old_deviceless(struct net *net, void __user *data) return -EOPNOTSUPP; } -int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg) +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg) { int ret = -EOPNOTSUPP; + struct ifreq ifr; + + if (cmd == SIOCBRADDIF || cmd == SIOCBRDELIF) { + void __user *data; + char *colon; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + if (get_user_ifreq(&ifr, &data, uarg)) + return -EFAULT; + + ifr.ifr_name[IFNAMSIZ - 1] = 0; + colon = strchr(ifr.ifr_name, ':'); + if (colon) + *colon = 0; + } rtnl_lock(); @@ -430,7 +446,21 @@ int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, break; case SIOCBRADDIF: case SIOCBRDELIF: - ret = add_del_if(br, ifr->ifr_ifindex, cmd == SIOCBRADDIF); + { + struct net_device *dev; + + dev = __dev_get_by_name(net, ifr.ifr_name); + if (!dev || !netif_device_present(dev)) { + ret = -ENODEV; + break; + } + if (!netif_is_bridge_master(dev)) { + ret = -EOPNOTSUPP; + break; + } + + ret = add_del_if(netdev_priv(dev), ifr.ifr_ifindex, cmd == SIOCBRADDIF); + } break; } diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index bc37e47ad829..400eb872b403 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -144,6 +144,8 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) e->flags |= MDB_FLAGS_STAR_EXCL; if (flags & MDB_PG_FLAGS_BLOCKED) e->flags |= MDB_FLAGS_BLOCKED; + if (flags & MDB_PG_FLAGS_OFFLOAD_FAILED) + e->flags |= MDB_FLAGS_OFFLOAD_FAILED; } static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip, @@ -517,16 +519,17 @@ static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg) rtnl_mdb_nlmsg_pg_size(pg); } -void br_mdb_notify(struct net_device *dev, - struct net_bridge_mdb_entry *mp, - struct net_bridge_port_group *pg, - int type) +static void __br_mdb_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg, + int type, bool notify_switchdev) { struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOBUFS; - br_switchdev_mdb_notify(dev, mp, pg, type); + if (notify_switchdev) + br_switchdev_mdb_notify(dev, mp, pg, type); skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC); if (!skb) @@ -544,6 +547,21 @@ errout: rtnl_set_sk_err(net, RTNLGRP_MDB, err); } +void br_mdb_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg, + int type) +{ + __br_mdb_notify(dev, mp, pg, type, true); +} + +void br_mdb_flag_change_notify(struct net_device *dev, + struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg) +{ + __br_mdb_notify(dev, mp, pg, RTM_NEWMDB, false); +} + static int nlmsg_populate_rtr_fill(struct sk_buff *skb, struct net_device *dev, int ifindex, u16 vid, u32 pid, @@ -732,7 +750,7 @@ static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg, mod_timer(&pg->timer, now + brmctx->multicast_membership_interval); else - del_timer(&pg->timer); + timer_delete(&pg->timer); br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); @@ -853,7 +871,7 @@ static int br_mdb_add_group_src(const struct br_mdb_config *cfg, cfg->entry->state == MDB_TEMPORARY) mod_timer(&ent->timer, now + br_multicast_gmi(brmctx)); else - del_timer(&ent->timer); + timer_delete(&ent->timer); /* Install a (S, G) forwarding entry for the source. */ err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack); @@ -953,7 +971,7 @@ static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg, mod_timer(&pg->timer, now + brmctx->multicast_membership_interval); else - del_timer(&pg->timer); + timer_delete(&pg->timer); br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); @@ -1040,7 +1058,7 @@ static int br_mdb_add_group(const struct br_mdb_config *cfg, /* host join */ if (!port) { - if (mp->host_joined) { + if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) { NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host"); return -EEXIST; } @@ -1674,7 +1692,7 @@ int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq, spin_lock_bh(&br->multicast_lock); mp = br_mdb_ip_get(br, &group); - if (!mp) { + if (!mp || (!mp->ports && !mp->host_joined)) { NL_SET_ERR_MSG_MOD(extack, "MDB entry not found"); err = -ENOENT; goto unlock; diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index ee680adcee17..3f24b4ee49c2 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -73,18 +73,17 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state) } EXPORT_SYMBOL_GPL(br_mst_get_state); -static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v, +static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *v, u8 state) { - struct net_bridge_vlan_group *vg = nbp_vlan_group(p); - - if (v->state == state) + if (br_vlan_get_state(v) == state) return; - br_vlan_set_state(v, state); - if (v->vid == vg->pvid) br_vlan_set_pvid_state(vg, state); + + br_vlan_set_state(v, state); } int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, @@ -100,11 +99,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, }; struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; - int err; + int err = 0; - vg = nbp_vlan_group(p); + rcu_read_lock(); + vg = nbp_vlan_group_rcu(p); if (!vg) - return 0; + goto out; /* MSTI 0 (CST) state changes are notified via the regular * SWITCHDEV_ATTR_ID_PORT_STP_STATE. @@ -112,17 +112,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, if (msti) { err = switchdev_port_attr_set(p->dev, &attr, extack); if (err && err != -EOPNOTSUPP) - return err; + goto out; } - list_for_each_entry(v, &vg->vlan_list, vlist) { + err = 0; + list_for_each_entry_rcu(v, &vg->vlan_list, vlist) { if (v->brvlan->msti != msti) continue; - br_mst_vlan_set_state(p, v, state); + br_mst_vlan_set_state(vg, v, state); } - return 0; +out: + rcu_read_unlock(); + return err; } static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti) @@ -136,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti) * it. */ if (v != pv && v->brvlan->msti == msti) { - br_mst_vlan_set_state(pv->port, pv, v->state); + br_mst_vlan_set_state(vg, pv, v->state); return; } } /* Otherwise, start out in a new MSTI with all ports disabled. */ - return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED); + return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED); } int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index d7d021af1029..0224ef3dfec0 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -546,7 +546,7 @@ static void br_multicast_fwd_src_add(struct net_bridge_group_src *src) return; /* the kernel is now responsible for removing this S,G */ - del_timer(&sg->timer); + timer_delete(&sg->timer); star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); if (!star_mp) return; @@ -648,7 +648,7 @@ static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp) static void br_multicast_group_expired(struct timer_list *t) { - struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer); + struct net_bridge_mdb_entry *mp = timer_container_of(mp, t, timer); struct net_bridge *br = mp->br; spin_lock(&br->multicast_lock); @@ -856,7 +856,7 @@ static void br_multicast_find_del_pg(struct net_bridge *br, static void br_multicast_port_group_expired(struct timer_list *t) { - struct net_bridge_port_group *pg = from_timer(pg, t, timer); + struct net_bridge_port_group *pg = timer_container_of(pg, t, timer); struct net_bridge_group_src *src_ent; struct net_bridge *br = pg->key.port->br; struct hlist_node *tmp; @@ -1314,7 +1314,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br, static void br_multicast_group_src_expired(struct timer_list *t) { - struct net_bridge_group_src *src = from_timer(src, t, timer); + struct net_bridge_group_src *src = timer_container_of(src, t, timer); struct net_bridge_port_group *pg; struct net_bridge *br = src->br; @@ -1667,8 +1667,8 @@ out: static void br_ip4_multicast_router_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip4_mc_router_timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip4_mc_router_timer); br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist); } @@ -1676,8 +1676,8 @@ static void br_ip4_multicast_router_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_router_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip6_mc_router_timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip6_mc_router_timer); br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist); } @@ -1713,8 +1713,8 @@ out: static void br_ip4_multicast_local_router_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_mc_router_timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_mc_router_timer); br_multicast_local_router_expired(brmctx, t); } @@ -1722,8 +1722,8 @@ static void br_ip4_multicast_local_router_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_local_router_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_mc_router_timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_mc_router_timer); br_multicast_local_router_expired(brmctx, t); } @@ -1746,8 +1746,8 @@ out: static void br_ip4_multicast_querier_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_other_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_other_query.timer); br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query); } @@ -1755,13 +1755,17 @@ static void br_ip4_multicast_querier_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_querier_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_other_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_other_query.timer); br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query); } #endif +static void br_multicast_query_delay_expired(struct timer_list *t) +{ +} + static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx, struct br_ip *ip, struct sk_buff *skb) @@ -1914,8 +1918,8 @@ out: static void br_ip4_multicast_port_query_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip4_own_query.timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip4_own_query.timer); br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query); } @@ -1923,8 +1927,8 @@ static void br_ip4_multicast_port_query_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_port_query_expired(struct timer_list *t) { - struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t, - ip6_own_query.timer); + struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t, + ip6_own_query.timer); br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query); } @@ -1932,7 +1936,8 @@ static void br_ip6_multicast_port_query_expired(struct timer_list *t) static void br_multicast_port_group_rexmit(struct timer_list *t) { - struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer); + struct net_bridge_port_group *pg = timer_container_of(pg, t, + rexmit_timer); struct bridge_mcast_other_query *other_query = NULL; struct net_bridge *br = pg->key.port->br; struct net_bridge_mcast_port *pmctx; @@ -2011,9 +2016,9 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port, void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) { #if IS_ENABLED(CONFIG_IPV6) - del_timer_sync(&pmctx->ip6_mc_router_timer); + timer_delete_sync(&pmctx->ip6_mc_router_timer); #endif - del_timer_sync(&pmctx->ip4_mc_router_timer); + timer_delete_sync(&pmctx->ip4_mc_router_timer); } int br_multicast_add_port(struct net_bridge_port *port) @@ -2041,16 +2046,14 @@ void br_multicast_del_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; struct net_bridge_port_group *pg; - HLIST_HEAD(deleted_head); struct hlist_node *n; /* Take care of the remaining groups, only perm ones should be left */ spin_lock_bh(&br->multicast_lock); hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) br_multicast_find_del_pg(br, pg); - hlist_move_list(&br->mcast_gc_list, &deleted_head); spin_unlock_bh(&br->multicast_lock); - br_multicast_gc(&deleted_head); + flush_work(&br->mcast_gc_work); br_multicast_port_ctx_deinit(&port->multicast_ctx); free_percpu(port->mcast_stats); } @@ -2059,8 +2062,8 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query) { query->startup_sent = 0; - if (try_to_del_timer_sync(&query->timer) >= 0 || - del_timer(&query->timer)) + if (timer_delete_sync_try(&query->timer) >= 0 || + timer_delete(&query->timer)) mod_timer(&query->timer, jiffies); } @@ -2103,12 +2106,17 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) } } -void br_multicast_enable_port(struct net_bridge_port *port) +static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx) { - struct net_bridge *br = port->br; + struct net_bridge *br = pmctx->port->br; spin_lock_bh(&br->multicast_lock); - __br_multicast_enable_port_ctx(&port->multicast_ctx); + if (br_multicast_port_ctx_is_vlan(pmctx) && + !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) { + spin_unlock_bh(&br->multicast_lock); + return; + } + __br_multicast_enable_port_ctx(pmctx); spin_unlock_bh(&br->multicast_lock); } @@ -2125,21 +2133,77 @@ static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) br_multicast_find_del_pg(pmctx->port->br, pg); del |= br_ip4_multicast_rport_del(pmctx); - del_timer(&pmctx->ip4_mc_router_timer); - del_timer(&pmctx->ip4_own_query.timer); + timer_delete(&pmctx->ip4_mc_router_timer); + timer_delete(&pmctx->ip4_own_query.timer); del |= br_ip6_multicast_rport_del(pmctx); #if IS_ENABLED(CONFIG_IPV6) - del_timer(&pmctx->ip6_mc_router_timer); - del_timer(&pmctx->ip6_own_query.timer); + timer_delete(&pmctx->ip6_mc_router_timer); + timer_delete(&pmctx->ip6_own_query.timer); #endif br_multicast_rport_del_notify(pmctx, del); } +static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx) +{ + struct net_bridge *br = pmctx->port->br; + + spin_lock_bh(&br->multicast_lock); + if (br_multicast_port_ctx_is_vlan(pmctx) && + !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) { + spin_unlock_bh(&br->multicast_lock); + return; + } + + __br_multicast_disable_port_ctx(pmctx); + spin_unlock_bh(&br->multicast_lock); +} + +static void br_multicast_toggle_port(struct net_bridge_port *port, bool on) +{ +#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) + if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) { + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *vlan; + + rcu_read_lock(); + vg = nbp_vlan_group_rcu(port); + if (!vg) { + rcu_read_unlock(); + return; + } + + /* iterate each vlan, toggle vlan multicast context */ + list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) { + struct net_bridge_mcast_port *pmctx = + &vlan->port_mcast_ctx; + u8 state = br_vlan_get_state(vlan); + /* enable vlan multicast context when state is + * LEARNING or FORWARDING + */ + if (on && br_vlan_state_allowed(state, true)) + br_multicast_enable_port_ctx(pmctx); + else + br_multicast_disable_port_ctx(pmctx); + } + rcu_read_unlock(); + return; + } +#endif + /* toggle port multicast context when vlan snooping is disabled */ + if (on) + br_multicast_enable_port_ctx(&port->multicast_ctx); + else + br_multicast_disable_port_ctx(&port->multicast_ctx); +} + +void br_multicast_enable_port(struct net_bridge_port *port) +{ + br_multicast_toggle_port(port, true); +} + void br_multicast_disable_port(struct net_bridge_port *port) { - spin_lock_bh(&port->br->multicast_lock); - __br_multicast_disable_port_ctx(&port->multicast_ctx); - spin_unlock_bh(&port->br->multicast_lock); + br_multicast_toggle_port(port, false); } static int __grp_src_delete_marked(struct net_bridge_port_group *pg) @@ -3198,7 +3262,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx, unsigned long max_delay) { if (!timer_pending(&query->timer)) - query->delay_time = jiffies + max_delay; + mod_timer(&query->delay_timer, jiffies + max_delay); mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval); } @@ -3478,7 +3542,7 @@ static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, if (mp->host_joined && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : - try_to_del_timer_sync(&mp->timer) >= 0)) + timer_delete_sync_try(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; @@ -3486,7 +3550,7 @@ static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx, pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : - try_to_del_timer_sync(&p->timer) >= 0 && + timer_delete_sync_try(&p->timer) >= 0 && (brmctx->multicast_igmp_version == 2 || p->filter_mode == MCAST_EXCLUDE)) mod_timer(&p->timer, now + max_delay); @@ -3567,7 +3631,7 @@ static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, if (mp->host_joined && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : - try_to_del_timer_sync(&mp->timer) >= 0)) + timer_delete_sync_try(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; @@ -3575,7 +3639,7 @@ static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx, pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : - try_to_del_timer_sync(&p->timer) >= 0 && + timer_delete_sync_try(&p->timer) >= 0 && (brmctx->multicast_mld_version == 1 || p->filter_mode == MCAST_EXCLUDE)) mod_timer(&p->timer, now + max_delay); @@ -3647,7 +3711,7 @@ br_multicast_leave_group(struct net_bridge_mcast *brmctx, if (!hlist_unhashed(&p->mglist) && (timer_pending(&p->timer) ? time_after(p->timer.expires, time) : - try_to_del_timer_sync(&p->timer) >= 0)) { + timer_delete_sync_try(&p->timer) >= 0)) { mod_timer(&p->timer, time); } @@ -3663,7 +3727,7 @@ br_multicast_leave_group(struct net_bridge_mcast *brmctx, if (mp->host_joined && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : - try_to_del_timer_sync(&mp->timer) >= 0)) { + timer_delete_sync_try(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); } @@ -3679,7 +3743,7 @@ br_multicast_leave_group(struct net_bridge_mcast *brmctx, if (!hlist_unhashed(&p->mglist) && (timer_pending(&p->timer) ? time_after(p->timer.expires, time) : - try_to_del_timer_sync(&p->timer) >= 0)) { + timer_delete_sync_try(&p->timer) >= 0)) { mod_timer(&p->timer, time); } @@ -3993,8 +4057,8 @@ out: static void br_ip4_multicast_query_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip4_own_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip4_own_query.timer); br_multicast_query_expired(brmctx, &brmctx->ip4_own_query, &brmctx->ip4_querier); @@ -4003,8 +4067,8 @@ static void br_ip4_multicast_query_expired(struct timer_list *t) #if IS_ENABLED(CONFIG_IPV6) static void br_ip6_multicast_query_expired(struct timer_list *t) { - struct net_bridge_mcast *brmctx = from_timer(brmctx, t, - ip6_own_query.timer); + struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t, + ip6_own_query.timer); br_multicast_query_expired(brmctx, &brmctx->ip6_own_query, &brmctx->ip6_querier); @@ -4041,13 +4105,11 @@ void br_multicast_ctx_init(struct net_bridge *br, brmctx->multicast_querier_interval = 255 * HZ; brmctx->multicast_membership_interval = 260 * HZ; - brmctx->ip4_other_query.delay_time = 0; brmctx->ip4_querier.port_ifidx = 0; seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); brmctx->multicast_igmp_version = 2; #if IS_ENABLED(CONFIG_IPV6) brmctx->multicast_mld_version = 1; - brmctx->ip6_other_query.delay_time = 0; brmctx->ip6_querier.port_ifidx = 0; seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); #endif @@ -4056,6 +4118,8 @@ void br_multicast_ctx_init(struct net_bridge *br, br_ip4_multicast_local_router_expired, 0); timer_setup(&brmctx->ip4_other_query.timer, br_ip4_multicast_querier_expired, 0); + timer_setup(&brmctx->ip4_other_query.delay_timer, + br_multicast_query_delay_expired, 0); timer_setup(&brmctx->ip4_own_query.timer, br_ip4_multicast_query_expired, 0); #if IS_ENABLED(CONFIG_IPV6) @@ -4063,6 +4127,8 @@ void br_multicast_ctx_init(struct net_bridge *br, br_ip6_multicast_local_router_expired, 0); timer_setup(&brmctx->ip6_other_query.timer, br_ip6_multicast_querier_expired, 0); + timer_setup(&brmctx->ip6_other_query.delay_timer, + br_multicast_query_delay_expired, 0); timer_setup(&brmctx->ip6_own_query.timer, br_ip6_multicast_query_expired, 0); #endif @@ -4195,13 +4261,41 @@ void br_multicast_open(struct net_bridge *br) static void __br_multicast_stop(struct net_bridge_mcast *brmctx) { - del_timer_sync(&brmctx->ip4_mc_router_timer); - del_timer_sync(&brmctx->ip4_other_query.timer); - del_timer_sync(&brmctx->ip4_own_query.timer); + timer_delete_sync(&brmctx->ip4_mc_router_timer); + timer_delete_sync(&brmctx->ip4_other_query.timer); + timer_delete_sync(&brmctx->ip4_other_query.delay_timer); + timer_delete_sync(&brmctx->ip4_own_query.timer); #if IS_ENABLED(CONFIG_IPV6) - del_timer_sync(&brmctx->ip6_mc_router_timer); - del_timer_sync(&brmctx->ip6_other_query.timer); - del_timer_sync(&brmctx->ip6_own_query.timer); + timer_delete_sync(&brmctx->ip6_mc_router_timer); + timer_delete_sync(&brmctx->ip6_other_query.timer); + timer_delete_sync(&brmctx->ip6_other_query.delay_timer); + timer_delete_sync(&brmctx->ip6_own_query.timer); +#endif +} + +void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state) +{ +#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) + struct net_bridge *br; + + if (!br_vlan_should_use(v)) + return; + + if (br_vlan_is_master(v)) + return; + + br = v->port->br; + + if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) + return; + + if (br_vlan_state_allowed(state, true)) + br_multicast_enable_port_ctx(&v->port_mcast_ctx); + + /* Multicast is not disabled for the vlan when it goes in + * blocking state because the timers will expire and stop by + * themselves without sending more queries. + */ #endif } @@ -4298,9 +4392,9 @@ int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, __br_multicast_open(&br->multicast_ctx); list_for_each_entry(p, &br->port_list, list) { if (on) - br_multicast_disable_port(p); + br_multicast_disable_port_ctx(&p->multicast_ctx); else - br_multicast_enable_port(p); + br_multicast_enable_port_ctx(&p->multicast_ctx); } list_for_each_entry(vlan, &vg->vlan_list, vlist) @@ -4378,9 +4472,9 @@ int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val) case MDB_RTR_TYPE_DISABLED: case MDB_RTR_TYPE_PERM: br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); - del_timer(&brmctx->ip4_mc_router_timer); + timer_delete(&brmctx->ip4_mc_router_timer); #if IS_ENABLED(CONFIG_IPV6) - del_timer(&brmctx->ip6_mc_router_timer); + timer_delete(&brmctx->ip6_mc_router_timer); #endif brmctx->multicast_router = val; err = 0; @@ -4449,10 +4543,10 @@ int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, case MDB_RTR_TYPE_DISABLED: pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; del |= br_ip4_multicast_rport_del(pmctx); - del_timer(&pmctx->ip4_mc_router_timer); + timer_delete(&pmctx->ip4_mc_router_timer); del |= br_ip6_multicast_rport_del(pmctx); #if IS_ENABLED(CONFIG_IPV6) - del_timer(&pmctx->ip6_mc_router_timer); + timer_delete(&pmctx->ip6_mc_router_timer); #endif br_multicast_rport_del_notify(pmctx, del); break; @@ -4464,10 +4558,10 @@ int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx, break; case MDB_RTR_TYPE_PERM: pmctx->multicast_router = MDB_RTR_TYPE_PERM; - del_timer(&pmctx->ip4_mc_router_timer); + timer_delete(&pmctx->ip4_mc_router_timer); br_ip4_multicast_add_router(brmctx, pmctx); #if IS_ENABLED(CONFIG_IPV6) - del_timer(&pmctx->ip6_mc_router_timer); + timer_delete(&pmctx->ip6_mc_router_timer); #endif br_ip6_multicast_add_router(brmctx, pmctx); break; @@ -4643,13 +4737,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val) max_delay = brmctx->multicast_query_response_interval; if (!timer_pending(&brmctx->ip4_other_query.timer)) - brmctx->ip4_other_query.delay_time = jiffies + max_delay; + mod_timer(&brmctx->ip4_other_query.delay_timer, + jiffies + max_delay); br_multicast_start_querier(brmctx, &brmctx->ip4_own_query); #if IS_ENABLED(CONFIG_IPV6) if (!timer_pending(&brmctx->ip6_other_query.timer)) - brmctx->ip6_other_query.delay_time = jiffies + max_delay; + mod_timer(&brmctx->ip6_other_query.delay_timer, + jiffies + max_delay); br_multicast_start_querier(brmctx, &brmctx->ip6_own_query); #endif @@ -5043,7 +5139,7 @@ void br_multicast_uninit_stats(struct net_bridge *br) free_percpu(br->mcast_stats); } -/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ +/* noinline for https://llvm.org/pr45802#c9 */ static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) { dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; diff --git a/net/bridge/br_multicast_eht.c b/net/bridge/br_multicast_eht.c index c126aa4e7551..adfd74102019 100644 --- a/net/bridge/br_multicast_eht.c +++ b/net/bridge/br_multicast_eht.c @@ -207,7 +207,9 @@ void br_multicast_eht_clean_sets(struct net_bridge_port_group *pg) static void br_multicast_eht_set_entry_expired(struct timer_list *t) { - struct net_bridge_group_eht_set_entry *set_h = from_timer(set_h, t, timer); + struct net_bridge_group_eht_set_entry *set_h = timer_container_of(set_h, + t, + timer); struct net_bridge *br = set_h->br; spin_lock(&br->multicast_lock); @@ -223,8 +225,9 @@ out: static void br_multicast_eht_set_expired(struct timer_list *t) { - struct net_bridge_group_eht_set *eht_set = from_timer(eht_set, t, - timer); + struct net_bridge_group_eht_set *eht_set = timer_container_of(eht_set, + t, + timer); struct net_bridge *br = eht_set->br; spin_lock(&br->multicast_lock); diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index ed1720890757..94cbe967d1c1 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -33,9 +33,11 @@ #include <net/ip.h> #include <net/ipv6.h> #include <net/addrconf.h> +#include <net/dst_metadata.h> #include <net/route.h> #include <net/netfilter/br_netfilter.h> #include <net/netns/generic.h> +#include <net/inet_dscp.h> #include <linux/uaccess.h> #include "br_private.h" @@ -43,6 +45,10 @@ #include <linux/sysctl.h> #endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include <net/netfilter/nf_conntrack_core.h> +#endif + static unsigned int brnf_net_id __read_mostly; struct brnf_net { @@ -133,6 +139,7 @@ static inline bool is_pppoe_ipv6(const struct sk_buff *skb, #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) struct brnf_frag_data { + local_lock_t bh_lock; char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; u8 encap_size; u8 size; @@ -140,7 +147,9 @@ struct brnf_frag_data { __be16 vlan_proto; }; -static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage); +static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; static void nf_bridge_info_free(struct sk_buff *skb) { @@ -361,11 +370,11 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb, */ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb->dev, *br_indev; - struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct net_device *dev = skb->dev, *br_indev; + const struct iphdr *iph = ip_hdr(skb); + enum skb_drop_reason reason; struct rtable *rt; - int err; br_indev = nf_bridge_get_physindev(skb, net); if (!br_indev) { @@ -381,37 +390,13 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { - if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { - struct in_device *in_dev = __in_dev_get_rcu(dev); - - /* If err equals -EHOSTUNREACH the error is due to a - * martian destination or due to the fact that - * forwarding is disabled. For most martian packets, - * ip_route_output_key() will fail. It won't fail for 2 types of - * martian destinations: loopback destinations and destination - * 0.0.0.0. In both cases the packet will be dropped because the - * destination is the loopback device and not the bridge. */ - if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) - goto free_skb; - - rt = ip_route_output(net, iph->daddr, 0, - RT_TOS(iph->tos), 0); - if (!IS_ERR(rt)) { - /* - Bridged-and-DNAT'ed traffic doesn't - * require ip_forwarding. */ - if (rt->dst.dev == dev) { - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - goto bridged_dnat; - } - ip_rt_put(rt); - } -free_skb: - kfree_skb(skb); + reason = ip_route_input(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (reason) { + kfree_skb_reason(skb, reason); return 0; } else { if (skb_dst(skb)->dev == dev) { -bridged_dnat: skb->dev = br_indev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); @@ -553,6 +538,100 @@ static unsigned int br_nf_pre_routing(void *priv, return NF_STOLEN; } +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +/* conntracks' nf_confirm logic cannot handle cloned skbs referencing + * the same nf_conn entry, which will happen for multicast (broadcast) + * Frames on bridges. + * + * Example: + * macvlan0 + * br0 + * ethX ethY + * + * ethX (or Y) receives multicast or broadcast packet containing + * an IP packet, not yet in conntrack table. + * + * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting. + * -> skb->_nfct now references a unconfirmed entry + * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge + * interface. + * 3. skb gets passed up the stack. + * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb + * and schedules a work queue to send them out on the lower devices. + * + * The clone skb->_nfct is not a copy, it is the same entry as the + * original skb. The macvlan rx handler then returns RX_HANDLER_PASS. + * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb. + * + * The Macvlan broadcast worker and normal confirm path will race. + * + * This race will not happen if step 2 already confirmed a clone. In that + * case later steps perform skb_clone() with skb->_nfct already confirmed (in + * hash table). This works fine. + * + * But such confirmation won't happen when eb/ip/nftables rules dropped the + * packets before they reached the nf_confirm step in postrouting. + * + * Work around this problem by explicit confirmation of the entry at + * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed + * entry. + * + */ +static unsigned int br_nf_local_in(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + bool promisc = BR_INPUT_SKB_CB(skb)->promisc; + struct nf_conntrack *nfct = skb_nfct(skb); + const struct nf_ct_hook *ct_hook; + struct nf_conn *ct; + int ret; + + if (promisc) { + nf_reset_ct(skb); + return NF_ACCEPT; + } + + if (!nfct || skb->pkt_type == PACKET_HOST) + return NF_ACCEPT; + + ct = container_of(nfct, struct nf_conn, ct_general); + if (likely(nf_ct_is_confirmed(ct))) + return NF_ACCEPT; + + if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) { + nf_reset_ct(skb); + return NF_ACCEPT; + } + + WARN_ON_ONCE(skb_shared(skb)); + + /* We can't call nf_confirm here, it would create a dependency + * on nf_conntrack module. + */ + ct_hook = rcu_dereference(nf_ct_hook); + if (!ct_hook) { + skb->_nfct = 0ul; + nf_conntrack_put(nfct); + return NF_ACCEPT; + } + + nf_bridge_pull_encap_header(skb); + ret = ct_hook->confirm(skb); + switch (ret & NF_VERDICT_MASK) { + case NF_STOLEN: + return NF_STOLEN; + default: + nf_bridge_push_encap_header(skb); + break; + } + + ct = container_of(nfct, struct nf_conn, ct_general); + WARN_ON_ONCE(!nf_ct_is_confirmed(ct)); + + return ret; +} +#endif /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) @@ -755,6 +834,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff { struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); unsigned int mtu, mtu_reserved; + int ret; mtu_reserved = nf_bridge_mtu_reduction(skb); mtu = skb->dev->mtu; @@ -775,6 +855,10 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff return br_dev_queue_push_xmit(net, sk, skb); } + /* Fragmentation on metadata/template dst is not supported */ + if (unlikely(!skb_valid_dst(skb))) + goto drop; + /* This is wrong! We should preserve the original fragment * boundaries by preserving frag_list rather than refragmenting. */ @@ -787,6 +871,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; + local_lock_nested_bh(&brnf_frag_data_storage.bh_lock); data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -802,7 +887,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff skb_copy_from_linear_data_offset(skb, -data->size, data->mac, data->size); - return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit); + ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit); + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); + return ret; } if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { @@ -814,6 +901,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; + local_lock_nested_bh(&brnf_frag_data_storage.bh_lock); data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; @@ -821,8 +909,12 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff skb_copy_from_linear_data_offset(skb, -data->size, data->mac, data->size); - if (v6ops) - return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit); + if (v6ops) { + ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit); + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); + return ret; + } + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); kfree_skb(skb); return -EMSGSIZE; @@ -964,6 +1056,14 @@ static const struct nf_hook_ops br_nf_ops[] = { .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + { + .hook = br_nf_local_in, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_IN, + .priority = NF_BR_PRI_LAST, + }, +#endif { .hook = br_nf_forward, .pf = NFPROTO_BRIDGE, @@ -1074,7 +1174,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, #ifdef CONFIG_SYSCTL static -int brnf_sysctl_call_tables(struct ctl_table *ctl, int write, +int brnf_sysctl_call_tables(const struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; @@ -1123,7 +1223,6 @@ static struct ctl_table brnf_table[] = { .mode = 0644, .proc_handler = brnf_sysctl_call_tables, }, - { } }; static inline void br_netfilter_sysctl_default(struct brnf_net *brnf) @@ -1172,7 +1271,7 @@ static int br_netfilter_sysctl_init_net(struct net *net) static void br_netfilter_sysctl_exit_net(struct net *net, struct brnf_net *brnet) { - struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg; + const struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg; unregister_net_sysctl_table(brnet->ctl_hdr); if (!net_eq(net, &init_net)) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5ad4abfcb7ba..6e337937d0d7 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, u32 filter_mask, const struct net_device *dev, bool getlink) { - u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; + u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) : + IF_OPER_DOWN; struct nlattr *af = NULL; struct net_bridge *br; struct ifinfomsg *hdr; @@ -666,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br, { u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED; - return br_info_notify(event, br, port, filter); + br_info_notify(event, br, port, filter); } /* @@ -1552,11 +1553,13 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], return 0; } -static int br_dev_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int br_dev_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; int err; err = register_netdevice(dev); @@ -1919,8 +1922,13 @@ int __init br_netlink_init(void) { int err; - br_vlan_rtnl_init(); - rtnl_af_register(&br_af_ops); + err = br_vlan_rtnl_init(); + if (err) + goto out; + + err = rtnl_af_register(&br_af_ops); + if (err) + goto out_vlan; err = rtnl_link_register(&br_link_ops); if (err) @@ -1930,6 +1938,9 @@ int __init br_netlink_init(void) out_af: rtnl_af_unregister(&br_af_ops); +out_vlan: + br_vlan_rtnl_uninit(); +out: return err; } diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index 17abf092f7ca..71a12da30004 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -315,8 +315,8 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, if (curr_change) *changed = curr_change; - __vlan_tunnel_handle_range(p, &v_start, &v_end, v, - curr_change); + __vlan_tunnel_handle_range(p, &v_start, &v_end, v, + curr_change); } if (v_start && v_end) br_vlan_notify(br, p, v_start->vid, v_end->vid, diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 98aea5485aae..a8c67035e23c 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; rcuref_init(&rt->dst.__rcuref, 1); rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + dst_init_metrics(&rt->dst, br->metrics, false); + dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index b0a92c344722..b159aae594c0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -78,7 +78,7 @@ struct bridge_mcast_own_query { /* other querier */ struct bridge_mcast_other_query { struct timer_list timer; - unsigned long delay_time; + struct timer_list delay_timer; }; /* selected querier */ @@ -306,11 +306,12 @@ struct net_bridge_fdb_flush_desc { u16 vlan_id; }; -#define MDB_PG_FLAGS_PERMANENT BIT(0) -#define MDB_PG_FLAGS_OFFLOAD BIT(1) -#define MDB_PG_FLAGS_FAST_LEAVE BIT(2) -#define MDB_PG_FLAGS_STAR_EXCL BIT(3) -#define MDB_PG_FLAGS_BLOCKED BIT(4) +#define MDB_PG_FLAGS_PERMANENT BIT(0) +#define MDB_PG_FLAGS_OFFLOAD BIT(1) +#define MDB_PG_FLAGS_FAST_LEAVE BIT(2) +#define MDB_PG_FLAGS_STAR_EXCL BIT(3) +#define MDB_PG_FLAGS_BLOCKED BIT(4) +#define MDB_PG_FLAGS_OFFLOAD_FAILED BIT(5) #define PG_SRC_ENT_LIMIT 32 @@ -483,6 +484,7 @@ enum net_bridge_opts { BROPT_VLAN_BRIDGE_BINDING, BROPT_MCAST_VLAN_SNOOPING_ENABLED, BROPT_MST_ENABLED, + BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION, }; struct net_bridge { @@ -505,6 +507,7 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; + u32 metrics[RTAX_MAX]; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; @@ -589,6 +592,7 @@ struct br_input_skb_cb { #endif u8 proxyarp_replied:1; u8 src_port_isolated:1; + u8 promisc:1; #ifdef CONFIG_BRIDGE_VLAN_FILTERING u8 vlan_filtered:1; #endif @@ -852,12 +856,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - struct netlink_ext_ack *extack); + bool *notified, struct netlink_ext_ack *extack); int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack); int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, - struct netlink_ext_ack *extack); + bool *notified, struct netlink_ext_ack *extack); int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *fdev, int *idx); int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev, @@ -948,8 +952,7 @@ br_port_get_check_rtnl(const struct net_device *dev) /* br_ioctl.c */ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); -int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg); /* br_multicast.c */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING @@ -1002,6 +1005,8 @@ int br_mdb_hash_init(struct net_bridge *br); void br_mdb_hash_fini(struct net_bridge *br); void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp, struct net_bridge_port_group *pg, int type); +void br_mdb_flag_change_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp, + struct net_bridge_port_group *pg); void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx, int type); void br_multicast_del_pg(struct net_bridge_mdb_entry *mp, @@ -1051,6 +1056,7 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port, struct net_bridge_vlan *vlan, struct net_bridge_mcast_port *pmctx); void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx); +void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state); void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on); int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on, struct netlink_ext_ack *extack); @@ -1159,7 +1165,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx, own_querier_enabled = false; } - return time_is_before_jiffies(querier->delay_time) && + return !timer_pending(&querier->delay_timer) && (own_querier_enabled || timer_pending(&querier->timer)); } @@ -1342,6 +1348,22 @@ br_multicast_ctx_matches_vlan_snooping(const struct net_bridge_mcast *brmctx) return !!(vlan_snooping_enabled == br_multicast_ctx_is_vlan(brmctx)); } + +static inline void +br_multicast_set_pg_offload_flags(struct net_bridge_port_group *p, + bool offloaded) +{ + p->flags &= ~(MDB_PG_FLAGS_OFFLOAD | MDB_PG_FLAGS_OFFLOAD_FAILED); + p->flags |= (offloaded ? MDB_PG_FLAGS_OFFLOAD : + MDB_PG_FLAGS_OFFLOAD_FAILED); +} + +static inline bool +br_mdb_should_notify(const struct net_bridge *br, u8 changed_flags) +{ + return br_opt_get(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION) && + (changed_flags & MDB_PG_FLAGS_OFFLOAD_FAILED); +} #else static inline int br_multicast_rcv(struct net_bridge_mcast **brmctx, struct net_bridge_mcast_port **pmctx, @@ -1501,6 +1523,11 @@ static inline void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pm { } +static inline void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, + u8 state) +{ +} + static inline void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on) { @@ -1570,7 +1597,10 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, void br_vlan_port_event(struct net_bridge_port *p, unsigned long event); int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr); -void br_vlan_rtnl_init(void); +void br_vlan_vlan_upper_event(struct net_device *br_dev, + struct net_device *vlan_dev, + unsigned long event); +int br_vlan_rtnl_init(void); void br_vlan_rtnl_uninit(void); void br_vlan_notify(const struct net_bridge *br, const struct net_bridge_port *p, @@ -1801,8 +1831,15 @@ static inline int br_vlan_bridge_event(struct net_device *dev, return 0; } -static inline void br_vlan_rtnl_init(void) +static inline void br_vlan_vlan_upper_event(struct net_device *br_dev, + struct net_device *vlan_dev, + unsigned long event) +{ +} + +static inline int br_vlan_rtnl_init(void) { + return 0; } static inline void br_vlan_rtnl_uninit(void) @@ -1851,7 +1888,9 @@ bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, const struct net_bridge_vlan *v_opts); -/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */ +/* vlan state manipulation helpers using *_ONCE to annotate lock-free access, + * while br_vlan_set_state() may access data protected by multicast_lock. + */ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v) { return READ_ONCE(v->state); @@ -1860,6 +1899,7 @@ static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v) static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state) { WRITE_ONCE(v->state, state); + br_multicast_update_vlan_mcast_ctx(v, state); } static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg) @@ -2288,6 +2328,6 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p); void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p, struct nd_msg *msg); -struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m); +struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *m); bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid); #endif diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 7d27b2e6038f..024210f95468 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -198,7 +198,7 @@ void br_become_root_bridge(struct net_bridge *br) br->hello_time = br->bridge_hello_time; br->forward_delay = br->bridge_forward_delay; br_topology_change_detection(br); - del_timer(&br->tcn_timer); + timer_delete(&br->tcn_timer); if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); @@ -363,7 +363,7 @@ static int br_supersedes_port_info(const struct net_bridge_port *p, static void br_topology_change_acknowledged(struct net_bridge *br) { br->topology_change_detected = 0; - del_timer(&br->tcn_timer); + timer_delete(&br->tcn_timer); } /* called under bridge lock */ @@ -439,7 +439,7 @@ static void br_make_blocking(struct net_bridge_port *p) br_set_state(p, BR_STATE_BLOCKING); br_ifinfo_notify(RTM_NEWLINK, NULL, p); - del_timer(&p->forward_delay_timer); + timer_delete(&p->forward_delay_timer); } } @@ -454,7 +454,7 @@ static void br_make_forwarding(struct net_bridge_port *p) if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { br_set_state(p, BR_STATE_FORWARDING); br_topology_change_detection(br); - del_timer(&p->forward_delay_timer); + timer_delete(&p->forward_delay_timer); } else if (br->stp_enabled == BR_KERNEL_STP) br_set_state(p, BR_STATE_LISTENING); else @@ -483,7 +483,7 @@ void br_port_state_selection(struct net_bridge *br) p->topology_change_ack = 0; br_make_forwarding(p); } else if (br_is_designated_port(p)) { - del_timer(&p->message_age_timer); + timer_delete(&p->message_age_timer); br_make_forwarding(p); } else { p->config_pending = 0; @@ -533,9 +533,9 @@ void br_received_config_bpdu(struct net_bridge_port *p, br_port_state_selection(br); if (!br_is_root_bridge(br) && was_root) { - del_timer(&br->hello_timer); + timer_delete(&br->hello_timer); if (br->topology_change_detected) { - del_timer(&br->topology_change_timer); + timer_delete(&br->topology_change_timer); br_transmit_tcn(br); mod_timer(&br->tcn_timer, diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 0e4572f31330..7895489ac6fe 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -17,7 +17,7 @@ #include <net/llc.h> #include <net/llc_pdu.h> #include <net/stp.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "br_private.h" #include "br_private_stp.h" diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 75204d36d7f9..c20a41bf253b 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -81,9 +81,9 @@ void br_stp_disable_bridge(struct net_bridge *br) br->topology_change_detected = 0; spin_unlock_bh(&br->lock); - del_timer_sync(&br->hello_timer); - del_timer_sync(&br->topology_change_timer); - del_timer_sync(&br->tcn_timer); + timer_delete_sync(&br->hello_timer); + timer_delete_sync(&br->topology_change_timer); + timer_delete_sync(&br->tcn_timer); cancel_delayed_work_sync(&br->gc_work); } @@ -109,9 +109,9 @@ void br_stp_disable_port(struct net_bridge_port *p) br_ifinfo_notify(RTM_NEWLINK, NULL, p); - del_timer(&p->message_age_timer); - del_timer(&p->forward_delay_timer); - del_timer(&p->hold_timer); + timer_delete(&p->message_age_timer); + timer_delete(&p->forward_delay_timer); + timer_delete(&p->hold_timer); if (!rcu_access_pointer(p->backup_port)) br_fdb_delete_by_port(br, p, 0, 0); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 27bf1979b909..e5d453305381 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -29,7 +29,7 @@ static int br_is_designated_for_some_port(const struct net_bridge *br) static void br_hello_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, hello_timer); + struct net_bridge *br = timer_container_of(br, t, hello_timer); br_debug(br, "hello timer expired\n"); spin_lock(&br->lock); @@ -45,7 +45,8 @@ static void br_hello_timer_expired(struct timer_list *t) static void br_message_age_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, message_age_timer); + struct net_bridge_port *p = timer_container_of(p, t, + message_age_timer); struct net_bridge *br = p->br; const bridge_id *id = &p->designated_bridge; int was_root; @@ -78,7 +79,8 @@ static void br_message_age_timer_expired(struct timer_list *t) static void br_forward_delay_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, forward_delay_timer); + struct net_bridge_port *p = timer_container_of(p, t, + forward_delay_timer); struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", @@ -102,7 +104,7 @@ static void br_forward_delay_timer_expired(struct timer_list *t) static void br_tcn_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, tcn_timer); + struct net_bridge *br = timer_container_of(br, t, tcn_timer); br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); @@ -116,7 +118,8 @@ static void br_tcn_timer_expired(struct timer_list *t) static void br_topology_change_timer_expired(struct timer_list *t) { - struct net_bridge *br = from_timer(br, t, topology_change_timer); + struct net_bridge *br = timer_container_of(br, t, + topology_change_timer); br_debug(br, "topo change timer expired\n"); spin_lock(&br->lock); @@ -127,7 +130,7 @@ static void br_topology_change_timer_expired(struct timer_list *t) static void br_hold_timer_expired(struct timer_list *t) { - struct net_bridge_port *p = from_timer(p, t, hold_timer); + struct net_bridge_port *p = timer_container_of(p, t, hold_timer); br_debug(p->br, "port %u(%s) hold timer expired\n", (unsigned int) p->port_no, p->dev->name); diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index ee84e783e1df..95d7355a0407 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -504,9 +504,10 @@ static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *pri struct net_bridge_mdb_entry *mp; struct net_bridge_port *port = data->port; struct net_bridge *br = port->br; + u8 old_flags; - if (err) - goto err; + if (err == -EOPNOTSUPP) + goto out_free; spin_lock_bh(&br->multicast_lock); mp = br_mdb_ip_get(br, &data->ip); @@ -516,11 +517,15 @@ static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *pri pp = &p->next) { if (p->key.port != port) continue; - p->flags |= MDB_PG_FLAGS_OFFLOAD; + + old_flags = p->flags; + br_multicast_set_pg_offload_flags(p, !err); + if (br_mdb_should_notify(br, old_flags ^ p->flags)) + br_mdb_flag_change_notify(br->dev, mp, p); } out: spin_unlock_bh(&br->multicast_lock); -err: +out_free: kfree(priv); } @@ -595,21 +600,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev, } static int br_switchdev_mdb_queue_one(struct list_head *mdb_list, + struct net_device *dev, + unsigned long action, enum switchdev_obj_id id, const struct net_bridge_mdb_entry *mp, struct net_device *orig_dev) { - struct switchdev_obj_port_mdb *mdb; + struct switchdev_obj_port_mdb mdb = { + .obj = { + .id = id, + .orig_dev = orig_dev, + }, + }; + struct switchdev_obj_port_mdb *pmdb; - mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC); - if (!mdb) - return -ENOMEM; + br_switchdev_mdb_populate(&mdb, mp); - mdb->obj.id = id; - mdb->obj.orig_dev = orig_dev; - br_switchdev_mdb_populate(mdb, mp); - list_add_tail(&mdb->obj.list, mdb_list); + if (action == SWITCHDEV_PORT_OBJ_ADD && + switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) { + /* This event is already in the deferred queue of + * events, so this replay must be elided, lest the + * driver receives duplicate events for it. This can + * only happen when replaying additions, since + * modifications are always immediately visible in + * br->mdb_list, whereas actual event delivery may be + * delayed. + */ + return 0; + } + pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC); + if (!pmdb) + return -ENOMEM; + + list_add_tail(&pmdb->obj.list, mdb_list); return 0; } @@ -677,51 +701,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev, if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) return 0; - /* We cannot walk over br->mdb_list protected just by the rtnl_mutex, - * because the write-side protection is br->multicast_lock. But we - * need to emulate the [ blocking ] calling context of a regular - * switchdev event, so since both br->multicast_lock and RCU read side - * critical sections are atomic, we have no choice but to pick the RCU - * read side lock, queue up all our events, leave the critical section - * and notify switchdev from blocking context. + if (adding) + action = SWITCHDEV_PORT_OBJ_ADD; + else + action = SWITCHDEV_PORT_OBJ_DEL; + + /* br_switchdev_mdb_queue_one() will take care to not queue a + * replay of an event that is already pending in the switchdev + * deferred queue. In order to safely determine that, there + * must be no new deferred MDB notifications enqueued for the + * duration of the MDB scan. Therefore, grab the write-side + * lock to avoid racing with any concurrent IGMP/MLD snooping. */ - rcu_read_lock(); + spin_lock_bh(&br->multicast_lock); - hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) { + hlist_for_each_entry(mp, &br->mdb_list, mdb_node) { struct net_bridge_port_group __rcu * const *pp; const struct net_bridge_port_group *p; if (mp->host_joined) { - err = br_switchdev_mdb_queue_one(&mdb_list, + err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, SWITCHDEV_OBJ_ID_HOST_MDB, mp, br_dev); if (err) { - rcu_read_unlock(); + spin_unlock_bh(&br->multicast_lock); goto out_free_mdb; } } - for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p->key.port->dev != dev) continue; - err = br_switchdev_mdb_queue_one(&mdb_list, + err = br_switchdev_mdb_queue_one(&mdb_list, dev, action, SWITCHDEV_OBJ_ID_PORT_MDB, mp, dev); if (err) { - rcu_read_unlock(); + spin_unlock_bh(&br->multicast_lock); goto out_free_mdb; } } } - rcu_read_unlock(); - - if (adding) - action = SWITCHDEV_PORT_OBJ_ADD; - else - action = SWITCHDEV_PORT_OBJ_DEL; + spin_unlock_bh(&br->multicast_lock); list_for_each_entry(obj, &mdb_list, list) { err = br_switchdev_mdb_replay_one(nb, dev, @@ -786,6 +809,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p, br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL); br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL); + + /* Make sure that the device leaving this bridge has seen all + * relevant events before it is disassociated. In the normal + * case, when the device is directly attached to the bridge, + * this is covered by del_nbp(). If the association was indirect + * however, e.g. via a team or bond, and the device is leaving + * that intermediate device, then the bridge port remains in + * place. + */ + switchdev_deferred_process(); } /* Let the bridge know that this port is offloaded, so that it can assign a diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index ea733542244c..c1176a5e02c4 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c @@ -1002,7 +1002,7 @@ static const struct attribute_group bridge_group = { * Returns the number of bytes read. */ static ssize_t brforward_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, + const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); @@ -1023,10 +1023,10 @@ static ssize_t brforward_read(struct file *filp, struct kobject *kobj, return n; } -static struct bin_attribute bridge_forward = { +static const struct bin_attribute bridge_forward = { .attr = { .name = SYSFS_BRIDGE_FDB, .mode = 0444, }, - .read = brforward_read, + .read_new = brforward_read, }; /* diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 15f44d026e75..939a3aa78d5c 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -715,8 +715,8 @@ static int br_vlan_add_existing(struct net_bridge *br, u16 flags, bool *changed, struct netlink_ext_ack *extack) { - bool would_change = __vlan_flags_would_change(vlan, flags); bool becomes_brentry = false; + bool would_change = false; int err; if (!br_vlan_is_brentry(vlan)) { @@ -725,6 +725,8 @@ static int br_vlan_add_existing(struct net_bridge *br, return -EINVAL; becomes_brentry = true; + } else { + would_change = __vlan_flags_would_change(vlan, flags); } /* Master VLANs that aren't brentries weren't notified before, @@ -841,7 +843,7 @@ void br_vlan_flush(struct net_bridge *br) vg = br_vlan_group(br); __vlan_flush(br, NULL, vg); RCU_INIT_POINTER(br->vlgrp, NULL); - synchronize_rcu(); + synchronize_net(); __vlan_group_free(vg); } @@ -1372,7 +1374,7 @@ void nbp_vlan_flush(struct net_bridge_port *port) vg = nbp_vlan_group(port); __vlan_flush(port->br, port, vg); RCU_INIT_POINTER(port->vlgrp, NULL); - synchronize_rcu(); + synchronize_net(); __vlan_group_free(vg); } @@ -1664,6 +1666,18 @@ static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p) } } +static void br_vlan_toggle_bridge_binding(struct net_device *br_dev, + bool enable) +{ + struct net_bridge *br = netdev_priv(br_dev); + + if (enable) + br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true); + else + br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, + br_vlan_has_upper_bind_vlan_dev(br_dev)); +} + static void br_vlan_upper_change(struct net_device *dev, struct net_device *upper_dev, bool linking) @@ -1673,13 +1687,9 @@ static void br_vlan_upper_change(struct net_device *dev, if (!br_vlan_is_bind_vlan_dev(upper_dev)) return; - if (linking) { + br_vlan_toggle_bridge_binding(dev, linking); + if (linking) br_vlan_set_vlan_dev_state(br, upper_dev); - br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true); - } else { - br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, - br_vlan_has_upper_bind_vlan_dev(dev)); - } } struct br_vlan_link_state_walk_data { @@ -1764,6 +1774,30 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr) return ret; } +void br_vlan_vlan_upper_event(struct net_device *br_dev, + struct net_device *vlan_dev, + unsigned long event) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(vlan_dev); + struct net_bridge *br = netdev_priv(br_dev); + bool bridge_binding; + + switch (event) { + case NETDEV_CHANGE: + case NETDEV_UP: + break; + default: + return; + } + + bridge_binding = vlan->flags & VLAN_FLAG_BRIDGE_BINDING; + br_vlan_toggle_bridge_binding(br_dev, bridge_binding); + if (bridge_binding) + br_vlan_set_vlan_dev_state(br, vlan_dev); + else if (!bridge_binding && netif_carrier_ok(br_dev)) + netif_carrier_on(vlan_dev); +} + /* Must be protected by RTNL. */ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event) { @@ -2296,19 +2330,18 @@ static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } -void br_vlan_rtnl_init(void) +static const struct rtnl_msg_handler br_vlan_rtnl_msg_handlers[] = { + {THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, br_vlan_rtm_process, NULL, 0}, + {THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, br_vlan_rtm_process, NULL, 0}, + {THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, br_vlan_rtm_dump, 0}, +}; + +int br_vlan_rtnl_init(void) { - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, - br_vlan_rtm_dump, 0); - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, - br_vlan_rtm_process, NULL, 0); - rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, - br_vlan_rtm_process, NULL, 0); + return rtnl_register_many(br_vlan_rtnl_msg_handlers); } void br_vlan_rtnl_uninit(void) { - rtnl_unregister(PF_BRIDGE, RTM_GETVLAN); - rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN); - rtnl_unregister(PF_BRIDGE, RTM_DELVLAN); + rtnl_unregister_many(br_vlan_rtnl_msg_handlers); } diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c index 81833ca7a2c7..a966a6ec8263 100644 --- a/net/bridge/br_vlan_tunnel.c +++ b/net/bridge/br_vlan_tunnel.c @@ -65,13 +65,14 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg, { struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst); __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id)); + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; int err; if (metadata) return -EEXIST; - metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY, - key, 0); + __set_bit(IP_TUNNEL_KEY_BIT, flags); + metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, key, 0); if (!metadata) return -EINVAL; @@ -185,6 +186,7 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb, int br_handle_egress_vlan_tunnel(struct sk_buff *skb, struct net_bridge_vlan *vlan) { + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; struct metadata_dst *tunnel_dst; __be64 tunnel_id; int err; @@ -202,7 +204,8 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb, return err; if (BR_INPUT_SKB_CB(skb)->backup_nhid) { - tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY, + __set_bit(IP_TUNNEL_KEY_BIT, flags); + tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, tunnel_id, 0); if (!tunnel_dst) return -ENOMEM; diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index 7f304a19ac1b..f16bbbbb9481 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -39,6 +39,16 @@ config NF_CONNTRACK_BRIDGE To compile it as a module, choose M here. If unsure, say N. +# old sockopt interface and eval loop +config BRIDGE_NF_EBTABLES_LEGACY + tristate "Legacy EBTABLES support" + depends on BRIDGE && NETFILTER_XTABLES + default n + help + Legacy ebtables packet/frame classifier. + This is not needed if you are using ebtables over nftables + (iptables-nft). + menuconfig BRIDGE_NF_EBTABLES tristate "Ethernet Bridge tables (ebtables) support" depends on BRIDGE && NETFILTER && NETFILTER_XTABLES @@ -55,6 +65,7 @@ if BRIDGE_NF_EBTABLES # config BRIDGE_EBT_BROUTE tristate "ebt: broute table support" + select BRIDGE_NF_EBTABLES_LEGACY help The ebtables broute table is used to define rules that decide between bridging and routing frames, giving Linux the functionality of a @@ -65,6 +76,7 @@ config BRIDGE_EBT_BROUTE config BRIDGE_EBT_T_FILTER tristate "ebt: filter table support" + select BRIDGE_NF_EBTABLES_LEGACY help The ebtables filter table is used to define frame filtering rules at local input, forwarding and local output. See the man page for @@ -74,6 +86,7 @@ config BRIDGE_EBT_T_FILTER config BRIDGE_EBT_T_NAT tristate "ebt: nat table support" + select BRIDGE_NF_EBTABLES_LEGACY help The ebtables nat table is used to define rules that alter the MAC source address (MAC SNAT) or the MAC destination address (MAC DNAT). diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile index 1c9ce49ab651..b9a1303da977 100644 --- a/net/bridge/netfilter/Makefile +++ b/net/bridge/netfilter/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o # connection tracking obj-$(CONFIG_NF_CONNTRACK_BRIDGE) += nf_conntrack_bridge.o -obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o +obj-$(CONFIG_BRIDGE_NF_EBTABLES_LEGACY) += ebtables.o # tables obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 99d82676f780..3e67d4aff419 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1111,6 +1111,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len) struct ebt_table_info *newinfo; struct ebt_replace tmp; + if (len < sizeof(tmp)) + return -EINVAL; if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; @@ -1254,7 +1256,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table, goto free_unlock; } - ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL); + ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL); if (!ops) { ret = -ENOMEM; if (newinfo->nentries) @@ -1423,6 +1425,8 @@ static int update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct ebt_replace hlp; + if (len < sizeof(hlp)) + return -EINVAL; if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; @@ -2352,6 +2356,8 @@ static int compat_update_counters(struct net *net, sockptr_t arg, { struct compat_ebt_replace hlp; + if (len < sizeof(hlp)) + return -EINVAL; if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index abb090f94ed2..6482de4d8750 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; - bool mono_delivery_time = skb->mono_delivery_time; + u8 tstamp_type = skb->tstamp_type; unsigned int hlen, ll_rs, mtu; ktime_t tstamp = skb->tstamp; struct ip_frag_state state; @@ -60,19 +60,19 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct ip_fraglist_iter iter; struct sk_buff *frag; - if (first_len - hlen > mtu || - skb_headroom(skb) < ll_rs) + if (first_len - hlen > mtu) goto blackhole; - if (skb_cloned(skb)) + if (skb_cloned(skb) || + skb_headroom(skb) < ll_rs) goto slow_path; skb_walk_frags(skb, frag) { - if (frag->len > mtu || - skb_headroom(frag) < hlen + ll_rs) + if (frag->len > mtu) goto blackhole; - if (skb_shared(frag)) + if (skb_shared(frag) || + skb_headroom(frag) < hlen + ll_rs) goto slow_path; } @@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, if (iter.frag) ip_fraglist_prepare(skb, &iter); - skb_set_delivery_time(skb, tstamp, mono_delivery_time); + skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, data, skb); if (err || !iter.frag) break; @@ -113,7 +113,7 @@ slow_path: goto blackhole; } - skb_set_delivery_time(skb2, tstamp, mono_delivery_time); + skb_set_delivery_time(skb2, tstamp, tstamp_type); err = output(net, sk, data, skb2); if (err) goto blackhole; @@ -291,6 +291,36 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb, return nf_conntrack_in(skb, &bridge_state); } +static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + bool promisc = BR_INPUT_SKB_CB(skb)->promisc; + struct nf_conntrack *nfct = skb_nfct(skb); + struct nf_conn *ct; + + if (promisc) { + nf_reset_ct(skb); + return NF_ACCEPT; + } + + if (!nfct || skb->pkt_type == PACKET_HOST) + return NF_ACCEPT; + + /* nf_conntrack_confirm() cannot handle concurrent clones, + * this happens for broad/multicast frames with e.g. macvlan on top + * of the bridge device. + */ + ct = container_of(nfct, struct nf_conn, ct_general); + if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) + return NF_ACCEPT; + + /* let inet prerouting call conntrack again */ + skb->_nfct = 0; + nf_ct_put(ct); + + return NF_ACCEPT; +} + static void nf_ct_bridge_frag_save(struct sk_buff *skb, struct nf_bridge_frag_data *data) { @@ -386,6 +416,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = { .priority = NF_IP_PRI_CONNTRACK, }, { + .hook = nf_ct_bridge_in, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM, + }, + { .hook = nf_ct_bridge_post, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_POST_ROUTING, diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index bd4d1b4d745f..5adced1e7d0c 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -63,7 +63,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr, return nft_meta_get_eval(expr, regs, pkt); } - strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ); + strscpy_pad((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ); return; err: regs->verdict.code = NFT_BREAK; @@ -142,7 +142,7 @@ static int nft_meta_bridge_set_init(const struct nft_ctx *ctx, } priv->len = len; - err = nft_parse_register_load(tb[NFTA_META_SREG], &priv->sreg, len); + err = nft_parse_register_load(ctx, tb[NFTA_META_SREG], &priv->sreg, len); if (err < 0) return err; @@ -168,8 +168,7 @@ static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track, } static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) + const struct nft_expr *expr) { struct nft_meta *priv = nft_expr_priv(expr); unsigned int hooks; @@ -179,7 +178,7 @@ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx, hooks = 1 << NF_BR_PRE_ROUTING; break; default: - return nft_meta_set_validate(ctx, expr, data); + return nft_meta_set_validate(ctx, expr); } return nft_chain_validate_hooks(ctx->chain, hooks); diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 71b54fed7263..1cb5c16e97b7 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -170,8 +170,7 @@ out: } static int nft_reject_bridge_validate(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nft_data **data) + const struct nft_expr *expr) { return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_IN)); |