diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 2112 |
1 files changed, 1372 insertions, 740 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0f4e68d31cc3..a48bf342084d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -41,13 +41,15 @@ struct mlxsw_sp_bridge { DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; + const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops; }; struct mlxsw_sp_bridge_device { struct net_device *dev; struct list_head list; struct list_head ports_list; - struct list_head mids_list; + struct list_head mdb_list; + struct rhashtable mdb_ht; u8 vlan_enabled:1, multicast_enabled:1, mrouter:1; @@ -59,7 +61,7 @@ struct mlxsw_sp_bridge_port { struct mlxsw_sp_bridge_device *bridge_device; struct list_head list; struct list_head vlans_list; - unsigned int ref_count; + refcount_t ref_count; u8 stp_state; unsigned long flags; bool mrouter; @@ -97,6 +99,37 @@ struct mlxsw_sp_bridge_ops { const struct mlxsw_sp_fid *fid); }; +struct mlxsw_sp_switchdev_ops { + void (*init)(struct mlxsw_sp *mlxsw_sp); +}; + +struct mlxsw_sp_mdb_entry_key { + unsigned char addr[ETH_ALEN]; + u16 fid; +}; + +struct mlxsw_sp_mdb_entry { + struct list_head list; + struct rhash_head ht_node; + struct mlxsw_sp_mdb_entry_key key; + u16 mid; + struct list_head ports_list; + u16 ports_count; +}; + +struct mlxsw_sp_mdb_entry_port { + struct list_head list; /* Member of 'ports_list'. */ + u16 local_port; + refcount_t refcount; + bool mrouter; +}; + +static const struct rhashtable_params mlxsw_sp_mdb_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key), + .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_mdb_entry_key), +}; + static int mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, @@ -104,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_bridge_port *bridge_port); + struct mlxsw_sp_bridge_port *bridge_port, + u16 fid_index); -static void -mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, +static int +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device - *bridge_device); + *bridge_device, bool mc_enabled); static void mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, @@ -136,9 +170,9 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, - void *data) + struct netdev_nested_priv *priv) { - struct mlxsw_sp *mlxsw_sp = data; + struct mlxsw_sp *mlxsw_sp = priv->data; mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); return 0; @@ -147,22 +181,84 @@ static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev, static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp, struct net_device *dev) { + struct netdev_nested_priv priv = { + .data = (void *)mlxsw_sp, + }; + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev); netdev_walk_all_upper_dev_rcu(dev, mlxsw_sp_bridge_device_upper_rif_destroy, - mlxsw_sp); + &priv); +} + +static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge, + struct net_device *br_dev, + struct netlink_ext_ack *extack) +{ + struct net_device *dev, *stop_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + if (netif_is_vxlan(dev) && netif_running(dev)) { + err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp, + br_dev, dev, 0, + extack); + if (err) { + stop_dev = dev; + goto err_vxlan_join; + } + } + } + + return 0; + +err_vxlan_join: + netdev_for_each_lower_dev(br_dev, dev, iter) { + if (netif_is_vxlan(dev) && netif_running(dev)) { + if (stop_dev == dev) + break; + mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev); + } + } + return err; +} + +static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge, + struct net_device *br_dev) +{ + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + if (netif_is_vxlan(dev) && netif_running(dev)) + mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev); + } +} + +static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp, + bool no_delay) +{ + struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; + unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval; + + mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, + msecs_to_jiffies(interval)); } static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, - struct net_device *br_dev) + struct net_device *br_dev, + struct netlink_ext_ack *extack) { struct device *dev = bridge->mlxsw_sp->bus_info->dev; struct mlxsw_sp_bridge_device *bridge_device; bool vlan_enabled = br_vlan_enabled(br_dev); + int err; if (vlan_enabled && bridge->vlan_enabled_exists) { dev_err(dev, "Only one VLAN-aware bridge is supported\n"); + NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported"); return ERR_PTR(-EINVAL); } @@ -170,40 +266,75 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, if (!bridge_device) return ERR_PTR(-ENOMEM); + err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params); + if (err) + goto err_mdb_rhashtable_init; + bridge_device->dev = br_dev; bridge_device->vlan_enabled = vlan_enabled; bridge_device->multicast_enabled = br_multicast_enabled(br_dev); bridge_device->mrouter = br_multicast_router(br_dev); INIT_LIST_HEAD(&bridge_device->ports_list); if (vlan_enabled) { + u16 proto; + bridge->vlan_enabled_exists = true; - bridge_device->ops = bridge->bridge_8021q_ops; + br_vlan_get_proto(br_dev, &proto); + if (proto == ETH_P_8021AD) + bridge_device->ops = bridge->bridge_8021ad_ops; + else + bridge_device->ops = bridge->bridge_8021q_ops; } else { bridge_device->ops = bridge->bridge_8021d_ops; } - INIT_LIST_HEAD(&bridge_device->mids_list); + INIT_LIST_HEAD(&bridge_device->mdb_list); + + if (list_empty(&bridge->bridges_list)) + mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false); list_add(&bridge_device->list, &bridge->bridges_list); + /* It is possible we already have VXLAN devices enslaved to the bridge. + * In which case, we need to replay their configuration as if they were + * just now enslaved to the bridge. + */ + err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack); + if (err) + goto err_vxlan_init; + return bridge_device; + +err_vxlan_init: + list_del(&bridge_device->list); + if (bridge_device->vlan_enabled) + bridge->vlan_enabled_exists = false; + rhashtable_destroy(&bridge_device->mdb_ht); +err_mdb_rhashtable_init: + kfree(bridge_device); + return ERR_PTR(err); } static void mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, struct mlxsw_sp_bridge_device *bridge_device) { + mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev); mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, bridge_device->dev); list_del(&bridge_device->list); + if (list_empty(&bridge->bridges_list)) + cancel_delayed_work(&bridge->fdb_notify.dw); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; WARN_ON(!list_empty(&bridge_device->ports_list)); - WARN_ON(!list_empty(&bridge_device->mids_list)); + WARN_ON(!list_empty(&bridge_device->mdb_list)); + rhashtable_destroy(&bridge_device->mdb_ht); kfree(bridge_device); } static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, - struct net_device *br_dev) + struct net_device *br_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_device *bridge_device; @@ -211,7 +342,7 @@ mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge, if (bridge_device) return bridge_device; - return mlxsw_sp_bridge_device_create(bridge, br_dev); + return mlxsw_sp_bridge_device_create(bridge, br_dev, extack); } static void @@ -253,16 +384,103 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev); } +static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack); +static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj); + +struct mlxsw_sp_bridge_port_replay_switchdev_objs { + struct net_device *brport_dev; + struct mlxsw_sp_port *mlxsw_sp_port; + int done; +}; + +static int +mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_port_obj_info *port_obj_info = ptr; + struct netlink_ext_ack *extack = port_obj_info->info.extack; + struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso; + int err = 0; + + rso = (void *)port_obj_info->info.ctx; + + if (event != SWITCHDEV_PORT_OBJ_ADD || + dev != rso->brport_dev) + goto out; + + /* When a port is joining the bridge through a LAG, there likely are + * VLANs configured on that LAG already. The replay will thus attempt to + * have the given port-vlans join the corresponding FIDs. But the LAG + * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN + * memberships, back before CHANGEUPPER was distributed and netdevice + * master set. So now before propagating the VLAN events further, we + * first need to kill the corresponding VID at the mlxsw_sp_port. + * + * Note that this doesn't need to be rolled back on failure -- if the + * replay fails, the enslavement is off, and the VIDs would be killed by + * LAG anyway as part of its rollback. + */ + if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) { + u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid; + + err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid); + if (err) + goto out; + } + + ++rso->done; + err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL, + port_obj_info->obj, extack); + +out: + return notifier_from_errno(err); +} + +static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = { + .notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs, +}; + +static int +mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_port_obj_info *port_obj_info = ptr; + struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso; + + rso = (void *)port_obj_info->info.ctx; + + if (event != SWITCHDEV_PORT_OBJ_ADD || + dev != rso->brport_dev) + return NOTIFY_DONE; + if (!rso->done--) + return NOTIFY_STOP; + + mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL, + port_obj_info->obj); + return NOTIFY_DONE; +} + +static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = { + .notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs, +}; + static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, - struct net_device *brport_dev) + struct net_device *brport_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; + int err; bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); if (!bridge_port) - return NULL; + return ERR_PTR(-ENOMEM); mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); bridge_port->lagged = mlxsw_sp_port->lagged; @@ -272,19 +490,30 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, bridge_port->system_port = mlxsw_sp_port->local_port; bridge_port->dev = brport_dev; bridge_port->bridge_device = bridge_device; - bridge_port->stp_state = BR_STATE_DISABLED; + bridge_port->stp_state = br_port_get_stp_state(brport_dev); bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | BR_MCAST_FLOOD; INIT_LIST_HEAD(&bridge_port->vlans_list); list_add(&bridge_port->list, &bridge_device->ports_list); - bridge_port->ref_count = 1; + refcount_set(&bridge_port->ref_count, 1); + + err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev, + NULL, NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; return bridge_port; + +err_switchdev_offload: + list_del(&bridge_port->list); + kfree(bridge_port); + return ERR_PTR(err); } static void mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) { + switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL); list_del(&bridge_port->list); WARN_ON(!list_empty(&bridge_port->vlans_list)); kfree(bridge_port); @@ -292,7 +521,8 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, - struct net_device *brport_dev) + struct net_device *brport_dev, + struct netlink_ext_ack *extack) { struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev); struct mlxsw_sp_bridge_device *bridge_device; @@ -301,17 +531,18 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); if (bridge_port) { - bridge_port->ref_count++; + refcount_inc(&bridge_port->ref_count); return bridge_port; } - bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev); + bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack); if (IS_ERR(bridge_device)) return ERR_CAST(bridge_device); - bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); - if (!bridge_port) { - err = -ENOMEM; + bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev, + extack); + if (IS_ERR(bridge_port)) { + err = PTR_ERR(bridge_port); goto err_bridge_port_create; } @@ -327,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, { struct mlxsw_sp_bridge_device *bridge_device; - if (--bridge_port->ref_count != 0) + if (!refcount_dec_and_test(&bridge_port->ref_count)) return; bridge_device = bridge_port->bridge_device; mlxsw_sp_bridge_port_destroy(bridge_port); @@ -431,46 +662,6 @@ static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) mlxsw_sp_bridge_vlan_destroy(bridge_vlan); } -static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, - struct net_device *dev, - unsigned long *brport_flags) -{ - struct mlxsw_sp_bridge_port *bridge_port; - - bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); - if (WARN_ON(!bridge_port)) - return; - - memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); -} - -static int mlxsw_sp_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); - memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, - attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, - &attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | - BR_MCAST_FLOOD; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_vlan *bridge_vlan, @@ -490,7 +681,6 @@ mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, u8 state) { @@ -498,9 +688,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_vlan *bridge_vlan; int err; - if (switchdev_trans_ph_prepare(trans)) - return 0; - /* It's possible we failed to enslave the port, yet this * operation is executed due to it being deferred. */ @@ -578,6 +765,64 @@ err_port_bridge_vlan_flood_set: } static int +mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan, + enum mlxsw_sp_flood_type packet_type, + bool member) +{ + struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + int err; + + list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list, + bridge_vlan_node) { + u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port; + + err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, + packet_type, local_port, member); + if (err) + goto err_fid_flood_set; + } + + return 0; + +err_fid_flood_set: + list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan, + &bridge_vlan->port_vlan_list, + list) { + u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port; + + mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type, + local_port, !member); + } + + return err; +} + +static int +mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port, + enum mlxsw_sp_flood_type packet_type, + bool member) +{ + struct mlxsw_sp_bridge_vlan *bridge_vlan; + int err; + + list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) { + err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type, + member); + if (err) + goto err_bridge_vlans_flood_set; + } + + return 0; + +err_bridge_vlans_flood_set: + list_for_each_entry_continue_reverse(bridge_vlan, + &bridge_port->vlans_list, list) + mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type, + !member); + return err; +} + +static int mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_vlan *bridge_vlan, bool set) @@ -620,45 +865,81 @@ err_port_bridge_vlan_learning_set: return err; } +static int +mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *orig_dev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | + BR_PORT_LOCKED | BR_PORT_MAB)) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag"); + return -EINVAL; + } + + if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper"); + return -EINVAL; + } + + if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers"); + return -EINVAL; + } + + return 0; +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, - unsigned long brport_flags) + struct switchdev_brport_flags flags) { struct mlxsw_sp_bridge_port *bridge_port; int err; - if (switchdev_trans_ph_prepare(trans)) - return 0; - bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, orig_dev); if (!bridge_port) return 0; - err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, - MLXSW_SP_FLOOD_TYPE_UC, - brport_flags & BR_FLOOD); - if (err) - return err; + if (flags.mask & BR_FLOOD) { + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, + bridge_port, + MLXSW_SP_FLOOD_TYPE_UC, + flags.val & BR_FLOOD); + if (err) + return err; + } - err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port, - brport_flags & BR_LEARNING); - if (err) - return err; + if (flags.mask & BR_LEARNING) { + err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, + bridge_port, + flags.val & BR_LEARNING); + if (err) + return err; + } + + if (flags.mask & BR_PORT_LOCKED) { + err = mlxsw_sp_port_security_set(mlxsw_sp_port, + flags.val & BR_PORT_LOCKED); + if (err) + return err; + } if (bridge_port->bridge_device->multicast_enabled) goto out; - err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, - MLXSW_SP_FLOOD_TYPE_MC, - brport_flags & - BR_MCAST_FLOOD); - if (err) - return err; + if (flags.mask & BR_MCAST_FLOOD) { + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, + bridge_port, + MLXSW_SP_FLOOD_TYPE_MC, + flags.val & BR_MCAST_FLOOD); + if (err) + return err; + } out: - memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); + memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val)); return 0; } @@ -676,35 +957,26 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) } static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, unsigned long ageing_clock_t) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; - if (switchdev_trans_ph_prepare(trans)) { - if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || - ageing_time > MLXSW_SP_MAX_AGEING_TIME) - return -ERANGE; - else - return 0; - } + if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || + ageing_time > MLXSW_SP_MAX_AGEING_TIME) + return -ERANGE; return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, bool vlan_enabled) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; - if (!switchdev_trans_ph_prepare(trans)) - return 0; - bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_device)) return -EINVAL; @@ -716,22 +988,36 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } +static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *orig_dev, + u16 vlan_proto) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_device)) + return -EINVAL; + + netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n"); + return -EINVAL; +} + static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, bool is_port_mrouter) { struct mlxsw_sp_bridge_port *bridge_port; int err; - if (switchdev_trans_ph_prepare(trans)) - return 0; - bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, orig_dev); if (!bridge_port) return 0; + mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, + is_port_mrouter); + if (!bridge_port->bridge_device->multicast_enabled) goto out; @@ -741,8 +1027,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; - mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, - is_port_mrouter); out: bridge_port->mrouter = is_port_mrouter; return 0; @@ -758,18 +1042,15 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) } static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, bool mc_disabled) { + enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp_bridge_port *bridge_port; int err; - if (switchdev_trans_ph_prepare(trans)) - return 0; - /* It's possible we failed to enslave the port, yet this * operation is executed due to it being deferred. */ @@ -777,43 +1058,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_device) return 0; - if (bridge_device->multicast_enabled != !mc_disabled) { - bridge_device->multicast_enabled = !mc_disabled; - mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, - bridge_device); - } + if (bridge_device->multicast_enabled == !mc_disabled) + return 0; + + bridge_device->multicast_enabled = !mc_disabled; + err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device, + !mc_disabled); + if (err) + goto err_mc_enable_sync; list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { - enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; bool member = mlxsw_sp_mc_flood(bridge_port); - err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, - bridge_port, - packet_type, member); + err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port, + packet_type, + member); if (err) - return err; + goto err_flood_table_set; } - bridge_device->multicast_enabled = !mc_disabled; - return 0; + +err_flood_table_set: + list_for_each_entry_continue_reverse(bridge_port, + &bridge_device->ports_list, list) { + bool member = mlxsw_sp_mc_flood(bridge_port); + + mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type, + !member); + } + mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device, + mc_disabled); +err_mc_enable_sync: + bridge_device->multicast_enabled = mc_disabled; + return err; } -static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, - u16 mid_idx, bool add) +static struct mlxsw_sp_mdb_entry_port * +mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry, + u16 local_port) { - char *smid_pl; + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + + list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) { + if (mdb_entry_port->local_port == local_port) + return mdb_entry_port; + } + + return NULL; +} + +static struct mlxsw_sp_mdb_entry_port * +mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mdb_entry *mdb_entry, + u16 local_port) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; int err; - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) - return -ENOMEM; + mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port); + if (mdb_entry_port) { + if (mdb_entry_port->mrouter && + refcount_read(&mdb_entry_port->refcount) == 1) + mdb_entry->ports_count++; - mlxsw_reg_smid_pack(smid_pl, mid_idx, - mlxsw_sp_router_port(mlxsw_sp), add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); - return err; + refcount_inc(&mdb_entry_port->refcount); + return mdb_entry_port; + } + + err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, true); + if (err) + return ERR_PTR(err); + + mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL); + if (!mdb_entry_port) { + err = -ENOMEM; + goto err_mdb_entry_port_alloc; + } + + mdb_entry_port->local_port = local_port; + refcount_set(&mdb_entry_port->refcount, 1); + list_add(&mdb_entry_port->list, &mdb_entry->ports_list); + mdb_entry->ports_count++; + + return mdb_entry_port; + +err_mdb_entry_port_alloc: + mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, false); + return ERR_PTR(err); +} + +static void +mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mdb_entry *mdb_entry, + u16 local_port, bool force) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + + mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port); + if (!mdb_entry_port) + return; + + if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) { + if (mdb_entry_port->mrouter && + refcount_read(&mdb_entry_port->refcount) == 1) + mdb_entry->ports_count--; + return; + } + + mdb_entry->ports_count--; + list_del(&mdb_entry_port->list); + kfree(mdb_entry_port); + mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, false); +} + +static __always_unused struct mlxsw_sp_mdb_entry_port * +mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mdb_entry *mdb_entry, + u16 local_port) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + int err; + + mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port); + if (mdb_entry_port) { + if (!mdb_entry_port->mrouter) + refcount_inc(&mdb_entry_port->refcount); + return mdb_entry_port; + } + + err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, true); + if (err) + return ERR_PTR(err); + + mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL); + if (!mdb_entry_port) { + err = -ENOMEM; + goto err_mdb_entry_port_alloc; + } + + mdb_entry_port->local_port = local_port; + refcount_set(&mdb_entry_port->refcount, 1); + mdb_entry_port->mrouter = true; + list_add(&mdb_entry_port->list, &mdb_entry->ports_list); + + return mdb_entry_port; + +err_mdb_entry_port_alloc: + mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, false); + return ERR_PTR(err); +} + +static __always_unused void +mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mdb_entry *mdb_entry, + u16 local_port) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + + mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port); + if (!mdb_entry_port) + return; + + if (!mdb_entry_port->mrouter) + return; + + mdb_entry_port->mrouter = false; + if (!refcount_dec_and_test(&mdb_entry_port->refcount)) + return; + + list_del(&mdb_entry_port->list); + kfree(mdb_entry_port); + mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid, + mdb_entry->key.fid, local_port, false); } static void @@ -821,24 +1243,27 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device *bridge_device, bool add) { - struct mlxsw_sp_mid *mid; + u16 local_port = mlxsw_sp_router_port(mlxsw_sp); + struct mlxsw_sp_mdb_entry *mdb_entry; - list_for_each_entry(mid, &bridge_device->mids_list, list) - mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); + list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) { + if (add) + mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry, + local_port); + else + mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, + local_port); + } } static int mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, struct net_device *orig_dev, bool is_mrouter) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; - if (switchdev_trans_ph_prepare(trans)) - return 0; - /* It's possible we failed to enslave the port, yet this * operation is executed due to it being deferred. */ @@ -853,45 +1278,56 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_port_attr_set(struct net_device *dev, +static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx, const struct switchdev_attr *attr, - struct switchdev_trans *trans) + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, attr->orig_dev, attr->u.stp_state); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port, + attr->orig_dev, + attr->u.brport_flags, + extack); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, attr->orig_dev, attr->u.brport_flags); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, attr->orig_dev, attr->u.vlan_filtering); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL: + err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, + attr->orig_dev, + attr->u.vlan_protocol); + break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: - err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, attr->orig_dev, attr->u.mrouter); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: - err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, attr->orig_dev, attr->u.mc_disabled); break; case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: - err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, + err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, attr->orig_dev, attr->u.mrouter); break; @@ -900,8 +1336,7 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, break; } - if (switchdev_trans_ph_commit(trans)) - mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); return err; } @@ -913,7 +1348,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_bridge_device *bridge_device; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; struct mlxsw_sp_fid *fid; int err; @@ -962,7 +1397,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; u16 vid = mlxsw_sp_port_vlan->vid; mlxsw_sp_port_vlan->fid = NULL; @@ -1024,7 +1459,7 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list); mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge, - bridge_port->dev); + bridge_port->dev, extack); mlxsw_sp_port_vlan->bridge_port = bridge_port; return 0; @@ -1046,14 +1481,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) struct mlxsw_sp_bridge_vlan *bridge_vlan; struct mlxsw_sp_bridge_port *bridge_port; u16 vid = mlxsw_sp_port_vlan->vid; - bool last_port, last_vlan; + bool last_port; if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) return; bridge_port = mlxsw_sp_port_vlan->bridge_port; - last_vlan = list_is_singular(&bridge_port->vlans_list); bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); last_port = list_is_singular(&bridge_vlan->port_vlan_list); @@ -1065,8 +1499,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, bridge_port, mlxsw_sp_fid_index(fid)); - if (last_vlan) - mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); + + mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port, + mlxsw_sp_fid_index(fid)); mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); @@ -1083,6 +1518,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; u16 old_pvid = mlxsw_sp_port->pvid; + u16 proto; int err; /* The only valid scenario in which a port-vlan already exists, is if @@ -1106,7 +1542,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_vlan_set; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + br_vlan_get_proto(bridge_port->bridge_device->dev, &proto); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto); if (err) goto err_port_pvid_set; @@ -1118,7 +1555,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; err_port_vlan_bridge_join: - mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto); err_port_pvid_set: mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); err_port_vlan_set: @@ -1127,41 +1564,19 @@ err_port_vlan_set: } static int -mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, - const struct switchdev_obj_port_vlan *vlan) +mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { - struct mlxsw_sp_rif *rif; - struct mlxsw_sp_fid *fid; - u16 pvid; - u16 vid; - - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev); - if (!rif) - return 0; - fid = mlxsw_sp_rif_fid(rif); - pvid = mlxsw_sp_fid_8021q_vid(fid); - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - if (vlan->flags & BRIDGE_VLAN_INFO_PVID) { - if (vid != pvid) { - netdev_err(br_dev, "Can't change PVID, it's used by router interface\n"); - return -EBUSY; - } - } else { - if (vid == pvid) { - netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n"); - return -EBUSY; - } - } - } + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - return 0; + return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid, + flag_pvid, extack); } static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans, struct netlink_ext_ack *extack) { bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; @@ -1169,24 +1584,18 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *orig_dev = vlan->obj.orig_dev; struct mlxsw_sp_bridge_port *bridge_port; - u16 vid; if (netif_is_bridge_master(orig_dev)) { int err = 0; - if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) && - br_vlan_enabled(orig_dev) && - switchdev_trans_ph_prepare(trans)) - err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp, - orig_dev, vlan); + if (br_vlan_enabled(orig_dev)) + err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev, + vlan, extack); if (!err) err = -EOPNOTSUPP; return err; } - if (switchdev_trans_ph_commit(trans)) - return 0; - bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_port)) return -EINVAL; @@ -1194,17 +1603,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_port->bridge_device->vlan_enabled) return 0; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - int err; - - err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, - vid, flag_untagged, - flag_pvid, extack); - if (err) - return err; - } - - return 0; + return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, + vlan->vid, flag_untagged, + flag_pvid, extack); } static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged) @@ -1242,38 +1643,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, - const char *mac, u16 fid, - enum mlxsw_sp_l3proto proto, - const union mlxsw_sp_l3addr *addr, - bool adding, bool dynamic) +static int +mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic, + const char *mac, u16 fid, __be32 addr, bool adding) { - enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto; char *sfd_pl; u8 num_rec; u32 uip; int err; - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - uip = be32_to_cpu(addr->addr4); - sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4; - break; - case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ - default: - WARN_ON(1); - return -EOPNOTSUPP; - } + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + uip = be32_to_cpu(addr); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0, + mlxsw_sp_sfd_rec_policy(dynamic), mac, + fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip); + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) + err = -EBUSY; + +out: + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + u32 kvdl_index, bool adding) +{ + char *sfd_pl; + u8 num_rec; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0, - mlxsw_sp_sfd_rec_policy(dynamic), mac, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, uip, - sfd_proto); + mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1287,8 +1702,82 @@ out: return err; } -static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const char *mac, u16 fid, bool adding, +static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + u32 kvdl_index; + int err; + + err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index); + if (err) + return err; + + err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, + kvdl_index, true); + if (err) + goto err_sfd_write; + + err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr); + if (err) + /* Replace can fail only for creating new mapping, so removing + * the FDB entry in the error path is OK. + */ + goto err_addr_replace; + + return 0; + +err_addr_replace: + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index, + false); +err_sfd_write: + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); + return err; +} + +static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + const struct in6_addr *addr) +{ + mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid); + mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false); + mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr); +} + +static int +mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac, + u16 fid, const struct in6_addr *addr, bool adding) +{ + if (adding) + return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid, + addr); + + mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr); + return 0; +} + +static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp, + const char *mac, u16 fid, + enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + bool adding, bool dynamic) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid, + addr->addr4, adding); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid, + &addr->addr6, adding); + default: + WARN_ON(1); + return -EOPNOTSUPP; + } +} + +static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, + const char *mac, u16 fid, u16 vid, + bool adding, enum mlxsw_reg_sfd_rec_action action, enum mlxsw_reg_sfd_rec_policy policy) { @@ -1301,7 +1790,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action, + local_port); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1315,19 +1805,19 @@ out: return err; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const char *mac, u16 fid, bool adding, - bool dynamic) +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port, + const char *mac, u16 fid, u16 vid, + bool adding, bool dynamic) { - return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, - MLXSW_REG_SFD_REC_ACTION_NOP, + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid, + adding, MLXSW_REG_SFD_REC_ACTION_NOP, mlxsw_sp_sfd_rec_policy(dynamic)); } int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, bool adding) { - return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding, MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY); } @@ -1389,7 +1879,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_port->lagged) return mlxsw_sp_port_fdb_uc_op(mlxsw_sp, bridge_port->system_port, - fdb_info->addr, fid_index, + fdb_info->addr, fid_index, vid, adding, false); else return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, @@ -1398,8 +1888,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, vid, adding, false); } -static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, - u16 fid, u16 mid_idx, bool adding) +static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_mdb_entry *mdb_entry, + bool adding) { char *sfd_pl; u8 num_rec; @@ -1410,8 +1901,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, return -ENOMEM; mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); - mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); + mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr, + mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP, + mdb_entry->mid); num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) @@ -1425,79 +1917,17 @@ out: return err; } -static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, - long *ports_bitmap, - bool set_router_port) -{ - char *smid_pl; - int err, i; - - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) - return -ENOMEM; - - mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { - if (mlxsw_sp->ports[i]) - mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); - } - - mlxsw_reg_smid_port_mask_set(smid_pl, - mlxsw_sp_router_port(mlxsw_sp), 1); - - for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) - mlxsw_reg_smid_port_set(smid_pl, i, 1); - - mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), - set_router_port); - - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); - return err; -} - -static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 mid_idx, bool add) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *smid_pl; - int err; - - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) - return -ENOMEM; - - mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); - return err; -} - -static struct -mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, - const unsigned char *addr, - u16 fid) -{ - struct mlxsw_sp_mid *mid; - - list_for_each_entry(mid, &bridge_device->mids_list, list) { - if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) - return mid; - } - return NULL; -} - static void mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, - unsigned long *ports_bitmap) + struct mlxsw_sp_ports_bitmap *ports_bm) { struct mlxsw_sp_port *mlxsw_sp_port; u64 max_lag_members, i; int lag_id; if (!bridge_port->lagged) { - set_bit(bridge_port->system_port, ports_bitmap); + set_bit(bridge_port->system_port, ports_bm->bitmap); } else { max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG_MEMBERS); @@ -1507,13 +1937,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, lag_id, i); if (mlxsw_sp_port) set_bit(mlxsw_sp_port->local_port, - ports_bitmap); + ports_bm->bitmap); } } } static void -mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, +mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm, struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp *mlxsw_sp) { @@ -1523,139 +1953,238 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, if (bridge_port->mrouter) { mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, bridge_port, - flood_bitmap); + flood_bm); } } } -static bool -mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mid *mid, - struct mlxsw_sp_bridge_device *bridge_device) -{ - long *flood_bitmap; - int num_of_ports; - int alloc_size; - u16 mid_idx; - int err; - - mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, - MLXSW_SP_MID_MAX); - if (mid_idx == MLXSW_SP_MID_MAX) - return false; +static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ports_bitmap *ports_bm, + struct mlxsw_sp_mdb_entry *mdb_entry) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + unsigned int nbits = ports_bm->nbits; + int i; - num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); - alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); - flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); - if (!flood_bitmap) - return false; + for_each_set_bit(i, ports_bm->bitmap, nbits) { + mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, + mdb_entry, + i); + if (IS_ERR(mdb_entry_port)) { + nbits = i; + goto err_mrouter_port_get; + } + } - bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); - mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); + return 0; - mid->mid = mid_idx; - err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, - bridge_device->mrouter); - kfree(flood_bitmap); - if (err) - return false; +err_mrouter_port_get: + for_each_set_bit(i, ports_bm->bitmap, nbits) + mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i); + return PTR_ERR(mdb_entry_port); +} - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, - true); - if (err) - return false; +static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ports_bitmap *ports_bm, + struct mlxsw_sp_mdb_entry *mdb_entry) +{ + int i; - set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); - mid->in_hw = true; - return true; + for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits) + mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i); } -static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mid *mid) +static int +mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_mdb_entry *mdb_entry, bool add) { - if (!mid->in_hw) - return 0; + struct mlxsw_sp_ports_bitmap ports_bm; + int err; - clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); - mid->in_hw = false; - return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, - false); + err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm); + if (err) + return err; + + mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp); + + if (add) + err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm, + mdb_entry); + else + mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry); + + mlxsw_sp_port_bitmap_fini(&ports_bm); + return err; } -static struct -mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_bridge_device *bridge_device, - const unsigned char *addr, - u16 fid) +static struct mlxsw_sp_mdb_entry * +mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, u16 fid, u16 local_port) { - struct mlxsw_sp_mid *mid; - size_t alloc_size; + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + struct mlxsw_sp_mdb_entry *mdb_entry; + int err; - mid = kzalloc(sizeof(*mid), GFP_KERNEL); - if (!mid) - return NULL; + mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL); + if (!mdb_entry) + return ERR_PTR(-ENOMEM); - alloc_size = sizeof(unsigned long) * - BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); + ether_addr_copy(mdb_entry->key.addr, addr); + mdb_entry->key.fid = fid; + err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid); + if (err) + goto err_pgt_mid_alloc; - mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); - if (!mid->ports_in_mid) - goto err_ports_in_mid_alloc; + INIT_LIST_HEAD(&mdb_entry->ports_list); - ether_addr_copy(mid->addr, addr); - mid->fid = fid; - mid->in_hw = false; + err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, + true); + if (err) + goto err_mdb_mrouters_set; - if (!bridge_device->multicast_enabled) - goto out; + mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry, + local_port); + if (IS_ERR(mdb_entry_port)) { + err = PTR_ERR(mdb_entry_port); + goto err_mdb_entry_port_get; + } - if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) - goto err_write_mdb_entry; + if (bridge_device->multicast_enabled) { + err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true); + if (err) + goto err_mdb_entry_write; + } -out: - list_add_tail(&mid->list, &bridge_device->mids_list); - return mid; + err = rhashtable_insert_fast(&bridge_device->mdb_ht, + &mdb_entry->ht_node, + mlxsw_sp_mdb_ht_params); + if (err) + goto err_rhashtable_insert; + + list_add_tail(&mdb_entry->list, &bridge_device->mdb_list); + + return mdb_entry; + +err_rhashtable_insert: + if (bridge_device->multicast_enabled) + mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false); +err_mdb_entry_write: + mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false); +err_mdb_entry_port_get: + mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false); +err_mdb_mrouters_set: + mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid); +err_pgt_mid_alloc: + kfree(mdb_entry); + return ERR_PTR(err); +} -err_write_mdb_entry: - kfree(mid->ports_in_mid); -err_ports_in_mid_alloc: - kfree(mid); - return NULL; +static void +mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mdb_entry *mdb_entry, + struct mlxsw_sp_bridge_device *bridge_device, + u16 local_port, bool force) +{ + list_del(&mdb_entry->list); + rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node, + mlxsw_sp_mdb_ht_params); + if (bridge_device->multicast_enabled) + mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false); + mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force); + mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false); + WARN_ON(!list_empty(&mdb_entry->ports_list)); + mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid); + kfree(mdb_entry); +} + +static struct mlxsw_sp_mdb_entry * +mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, u16 fid, u16 local_port) +{ + struct mlxsw_sp_mdb_entry_key key = {}; + struct mlxsw_sp_mdb_entry *mdb_entry; + + ether_addr_copy(key.addr, addr); + key.fid = fid; + mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key, + mlxsw_sp_mdb_ht_params); + if (mdb_entry) { + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + + mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, + mdb_entry, + local_port); + if (IS_ERR(mdb_entry_port)) + return ERR_CAST(mdb_entry_port); + + return mdb_entry; + } + + return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid, + local_port); } -static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mid *mid) +static bool +mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry, + struct mlxsw_sp_mdb_entry_port *removed_entry_port, + bool force) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - int err = 0; + if (mdb_entry->ports_count > 1) + return false; - clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); - if (bitmap_empty(mid->ports_in_mid, - mlxsw_core_max_ports(mlxsw_sp->core))) { - err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); - list_del(&mid->list); - kfree(mid->ports_in_mid); - kfree(mid); - } - return err; + if (force) + return true; + + if (!removed_entry_port->mrouter && + refcount_read(&removed_entry_port->refcount) > 1) + return false; + + if (removed_entry_port->mrouter && + refcount_read(&removed_entry_port->refcount) > 2) + return false; + + return true; +} + +static void +mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port, + bool force) +{ + struct mlxsw_sp_mdb_entry_port *mdb_entry_port; + + mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port); + if (!mdb_entry_port) + return; + + /* Avoid a temporary situation in which the MDB entry points to an empty + * PGT entry, as otherwise packets will be temporarily dropped instead + * of being flooded. Instead, in this situation, call + * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and + * then releases the PGT entry. + */ + if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force)) + mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device, + local_port, force); + else + mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, + force); } static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_mdb *mdb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *orig_dev = mdb->obj.orig_dev; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - struct net_device *dev = mlxsw_sp_port->dev; struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp_bridge_port *bridge_port; - struct mlxsw_sp_mid *mid; + struct mlxsw_sp_mdb_entry *mdb_entry; u16 fid_index; - int err = 0; - - if (switchdev_trans_ph_prepare(trans)) - return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (!bridge_port) @@ -1670,54 +2199,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); - if (!mid) { - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, - fid_index); - if (!mid) { - netdev_err(dev, "Unable to allocate MC group\n"); - return -ENOMEM; - } - } - set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); - - if (!bridge_device->multicast_enabled) - return 0; - - if (bridge_port->mrouter) - return 0; - - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); - if (err) { - netdev_err(dev, "Unable to set SMID\n"); - goto err_out; - } + mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device, + mdb->addr, fid_index, + mlxsw_sp_port->local_port); + if (IS_ERR(mdb_entry)) + return PTR_ERR(mdb_entry); return 0; - -err_out: - mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); - return err; } -static void -mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_bridge_device - *bridge_device) +static int +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + bool mc_enabled) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_mid *mid; - bool mc_enabled; - - mc_enabled = bridge_device->multicast_enabled; + struct mlxsw_sp_mdb_entry *mdb_entry; + int err; - list_for_each_entry(mid, &bridge_device->mids_list, list) { - if (mc_enabled) - mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, - bridge_device); - else - mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); + list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) { + err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled); + if (err) + goto err_mdb_entry_write; } + return 0; + +err_mdb_entry_write: + list_for_each_entry_continue_reverse(mdb_entry, + &bridge_device->mdb_list, list) + mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled); + return err; } static void @@ -1725,50 +2235,25 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port, bool add) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; - struct mlxsw_sp_mid *mid; + u16 local_port = mlxsw_sp_port->local_port; + struct mlxsw_sp_mdb_entry *mdb_entry; bridge_device = bridge_port->bridge_device; - list_for_each_entry(mid, &bridge_device->mids_list, list) { - if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) - mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); + list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) { + if (add) + mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry, + local_port); + else + mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, + local_port); } } -struct mlxsw_sp_span_respin_work { - struct work_struct work; - struct mlxsw_sp *mlxsw_sp; -}; - -static void mlxsw_sp_span_respin_work(struct work_struct *work) -{ - struct mlxsw_sp_span_respin_work *respin_work = - container_of(work, struct mlxsw_sp_span_respin_work, work); - - rtnl_lock(); - mlxsw_sp_span_respin(respin_work->mlxsw_sp); - rtnl_unlock(); - kfree(respin_work); -} - -static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_span_respin_work *respin_work; - - respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); - if (!respin_work) - return; - - INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); - respin_work->mlxsw_sp = mlxsw_sp; - - mlxsw_core_schedule_work(&respin_work->work); -} - -static int mlxsw_sp_port_obj_add(struct net_device *dev, +static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, - struct switchdev_trans *trans, struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1778,22 +2263,19 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans, - extack); - - if (switchdev_trans_ph_prepare(trans)) { - /* The event is emitted before the changes are actually - * applied to the bridge. Therefore schedule the respin - * call for later, so that the respin logic sees the - * updated bridge state. - */ - mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); - } + + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack); + + /* The event is emitted before the changes are actually + * applied to the bridge. Therefore schedule the respin + * call for later, so that the respin logic sees the + * updated bridge state. + */ + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_MDB(obj), - trans); + SWITCHDEV_OBJ_PORT_MDB(obj)); break; default: err = -EOPNOTSUPP; @@ -1809,13 +2291,15 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, { u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + u16 proto; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return; mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + br_vlan_get_proto(bridge_port->bridge_device->dev, &proto); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto); mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); } @@ -1826,7 +2310,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *orig_dev = vlan->obj.orig_dev; struct mlxsw_sp_bridge_port *bridge_port; - u16 vid; if (netif_is_bridge_master(orig_dev)) return -EOPNOTSUPP; @@ -1838,34 +2321,11 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_port->bridge_device->vlan_enabled) return 0; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) - mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid); + mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid); return 0; } -static int -__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_mid *mid) -{ - struct net_device *dev = mlxsw_sp_port->dev; - int err; - - if (bridge_port->bridge_device->multicast_enabled && - !bridge_port->mrouter) { - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); - } - - err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); - if (err) - netdev_err(dev, "Unable to remove MC SFD\n"); - - return err; -} - static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_mdb *mdb) { @@ -1875,7 +2335,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = mlxsw_sp_port->dev; struct mlxsw_sp_bridge_port *bridge_port; - struct mlxsw_sp_mid *mid; + struct mlxsw_sp_mdb_entry_key key = {}; + struct mlxsw_sp_mdb_entry *mdb_entry; u16 fid_index; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); @@ -1891,36 +2352,48 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); - if (!mid) { + ether_addr_copy(key.addr, mdb->addr); + key.fid = fid_index; + mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key, + mlxsw_sp_mdb_ht_params); + if (!mdb_entry) { netdev_err(dev, "Unable to remove port from MC DB\n"); return -EINVAL; } - return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); + mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry, + mlxsw_sp_port->local_port, false); + return 0; } static void mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_bridge_port *bridge_port) + struct mlxsw_sp_bridge_port *bridge_port, + u16 fid_index) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; - struct mlxsw_sp_mid *mid, *tmp; + struct mlxsw_sp_mdb_entry *mdb_entry, *tmp; + u16 local_port = mlxsw_sp_port->local_port; bridge_device = bridge_port->bridge_device; - list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { - if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { - __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, - mid); - } else if (bridge_device->multicast_enabled && - bridge_port->mrouter) { - mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); - } + list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list, + list) { + if (mdb_entry->key.fid != fid_index) + continue; + + if (bridge_port->mrouter) + mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, + mdb_entry, + local_port); + + mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry, + local_port, true); } } -static int mlxsw_sp_port_obj_del(struct net_device *dev, +static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1940,7 +2413,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, break; } - mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); return err; } @@ -1962,16 +2435,37 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, return NULL; } -static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { - .switchdev_port_attr_get = mlxsw_sp_port_attr_get, - .switchdev_port_attr_set = mlxsw_sp_port_attr_set, -}; +static int +mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = { + .brport_dev = bridge_port->dev, + .mlxsw_sp_port = mlxsw_sp_port, + }; + struct notifier_block *nb; + int err; + + nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb; + err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev, + &rso, NULL, nb, extack); + if (err) + goto err_replay; + + return 0; + +err_replay: + nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb; + switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev, + &rso, NULL, nb, extack); + return err; +} static int -mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, - struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_port *mlxsw_sp_port, - struct netlink_ext_ack *extack) +mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { if (is_vlan_dev(bridge_port->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); @@ -1982,7 +2476,25 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, if (mlxsw_sp_port->default_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); - return 0; + return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack); +} + +static int +mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port, + extack); +} + +static void +mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + /* Make sure untagged frames are allowed to ingress */ + mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, + ETH_P_8021Q); } static void @@ -1990,14 +2502,14 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port) { - /* Make sure untagged frames are allowed to ingress */ - mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); + mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port); } static int -mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, u16 vid, - struct netlink_ext_ack *extack) +mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + u16 vid, u16 ethertype, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); @@ -2005,6 +2517,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ethertype, }; struct mlxsw_sp_fid *fid; int err; @@ -2019,14 +2532,14 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, return err; } - /* If no other port is member in the VLAN, then the FID does not exist. - * NVE will be enabled on the FID once a port joins the VLAN - */ - fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); - if (!fid) - return 0; + fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); + if (IS_ERR(fid)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID"); + return PTR_ERR(fid); + } if (mlxsw_sp_fid_vni_is_set(fid)) { + NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); err = -EINVAL; goto err_vni_exists; } @@ -2035,11 +2548,6 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, if (err) goto err_nve_fid_enable; - /* The tunnel port does not hold a reference on the FID. Only - * local ports and the router port - */ - mlxsw_sp_fid_put(fid); - return 0; err_nve_fid_enable: @@ -2048,6 +2556,15 @@ err_vni_exists: return err; } +static int +mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021Q, extack); +} + static struct net_device * mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) { @@ -2076,38 +2593,8 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); - struct net_device *vxlan_dev; - struct mlxsw_sp_fid *fid; - int err; - - fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); - if (IS_ERR(fid)) - return fid; - - if (mlxsw_sp_fid_vni_is_set(fid)) - return fid; - - /* Find the VxLAN device that has the specified VLAN configured as - * PVID and egress untagged. There can be at most one such device - */ - vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, - vid); - if (!vxlan_dev) - return fid; - - if (!netif_running(vxlan_dev)) - return fid; - - err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, - extack); - if (err) - goto err_vxlan_join; - return fid; - -err_vxlan_join: - mlxsw_sp_fid_put(fid); - return ERR_PTR(err); + return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); } static struct mlxsw_sp_fid * @@ -2161,6 +2648,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct net_device *dev = bridge_port->dev; u16 vid; + int err; vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); @@ -2176,8 +2664,20 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, if (mlxsw_sp_port_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); - return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, - extack); + err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, + extack); + if (err) + return err; + + err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack); + if (err) + goto err_replay; + + return 0; + +err_replay: + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); + return err; } static void @@ -2208,15 +2708,19 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ETH_P_8021Q, }; struct mlxsw_sp_fid *fid; int err; - fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); - if (!fid) + fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); + if (IS_ERR(fid)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID"); return -EINVAL; + } if (mlxsw_sp_fid_vni_is_set(fid)) { + NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); err = -EINVAL; goto err_vni_exists; } @@ -2225,11 +2729,6 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, if (err) goto err_nve_fid_enable; - /* The tunnel port does not hold a reference on the FID. Only - * local ports and the router port - */ - mlxsw_sp_fid_put(fid); - return 0; err_nve_fid_enable: @@ -2243,34 +2742,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); - struct net_device *vxlan_dev; - struct mlxsw_sp_fid *fid; - int err; - - fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); - if (IS_ERR(fid)) - return fid; - if (mlxsw_sp_fid_vni_is_set(fid)) - return fid; - - vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev); - if (!vxlan_dev) - return fid; - - if (!netif_running(vxlan_dev)) - return fid; - - err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0, - extack); - if (err) - goto err_vxlan_join; - - return fid; - -err_vxlan_join: - mlxsw_sp_fid_put(fid); - return ERR_PTR(err); + return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex); } static struct mlxsw_sp_fid * @@ -2302,6 +2775,104 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, }; +static int +mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false); + if (err) + return err; + + err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port, + extack); + if (err) + goto err_bridge_vlan_aware_port_join; + + return 0; + +err_bridge_vlan_aware_port_join: + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); + return err; +} + +static void +mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port); + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); +} + +static int +mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021AD, extack); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = { + .port_join = mlxsw_sp_bridge_8021ad_port_join, + .port_leave = mlxsw_sp_bridge_8021ad_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, + .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, +}; + +static int +mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + int err; + + /* The EtherType of decapsulated packets is determined at the egress + * port to allow 802.1d and 802.1ad bridges with VXLAN devices to + * co-exist. + */ + err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD); + if (err) + return err; + + err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port, + mlxsw_sp_port, extack); + if (err) + goto err_bridge_8021ad_port_join; + + return 0; + +err_bridge_8021ad_port_join: + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); + return err; +} + +static void +mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port, + mlxsw_sp_port); + mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = { + .port_join = mlxsw_sp2_bridge_8021ad_port_join, + .port_leave = mlxsw_sp2_bridge_8021ad_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, + .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, +}; + int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev, @@ -2312,7 +2883,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; int err; - bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev); + bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev, + extack); if (IS_ERR(bridge_port)) return PTR_ERR(bridge_port); bridge_device = bridge_port->bridge_device; @@ -2322,8 +2894,15 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_join; + err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack); + if (err) + goto err_replay; + return 0; +err_replay: + bridge_device->ops->port_leave(bridge_device, bridge_port, + mlxsw_sp_port); err_port_join: mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); return err; @@ -2346,26 +2925,12 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, bridge_device->ops->port_leave(bridge_device, bridge_port, mlxsw_sp_port); + mlxsw_sp_port_security_set(mlxsw_sp_port, false); mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); } -int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, - const struct net_device *vxlan_dev, u16 vid, - struct netlink_ext_ack *extack) -{ - struct mlxsw_sp_bridge_device *bridge_device; - - bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); - if (WARN_ON(!bridge_device)) - return -EINVAL; - - return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, - extack); -} - -void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, - const struct net_device *vxlan_dev) +static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + const struct net_device *vxlan_dev) { struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); struct mlxsw_sp_fid *fid; @@ -2376,21 +2941,52 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, return; mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); + /* Drop both the reference we just took during lookup and the reference + * the VXLAN device took. + */ + mlxsw_sp_fid_put(fid); mlxsw_sp_fid_put(fid); } -struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, - u16 vid, - struct netlink_ext_ack *extack) +int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); if (WARN_ON(!bridge_device)) - return ERR_PTR(-EINVAL); + return -EINVAL; + + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev); + if (!mlxsw_sp_port) + return -EINVAL; + + err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, + extack); + if (err) + return err; + + err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev, + NULL, NULL, NULL, false, extack); + if (err) + goto err_bridge_port_offload; - return bridge_device->ops->fid_get(bridge_device, vid, extack); + return 0; + +err_bridge_port_offload: + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + return err; +} + +void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + struct net_device *vxlan_dev) +{ + switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); } static void @@ -2461,13 +3057,14 @@ static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev, static void mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, const char *mac, u16 vid, - struct net_device *dev, bool offloaded) + struct net_device *dev, bool offloaded, bool locked) { - struct switchdev_notifier_fdb_info info; + struct switchdev_notifier_fdb_info info = {}; info.addr = mac; info.vid = vid; info.offloaded = offloaded; + info.locked = locked; call_switchdev_notifiers(type, dev, &info.info, NULL); } @@ -2479,14 +3076,16 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; + u16 local_port, vid, fid, evid = 0; enum switchdev_notifier_type type; char mac[ETH_ALEN]; - u8 local_port; - u16 vid, fid; bool do_notification = true; int err; mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); + + if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) + return; mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); @@ -2507,9 +3106,16 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, bridge_device = bridge_port->bridge_device; vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; + evid = mlxsw_sp_port_vlan->vid; + + if (adding && mlxsw_sp_port->security) { + mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac, + vid, bridge_port->dev, false, true); + return; + } do_fdb_op: - err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, + err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid, adding, true); if (err) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); @@ -2519,7 +3125,8 @@ do_fdb_op: if (!do_notification) return; type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; - mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); + mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding, + false); return; @@ -2566,8 +3173,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, bridge_device = bridge_port->bridge_device; vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; - lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ? - mlxsw_sp_port_vlan->vid : 0; + lag_vid = mlxsw_sp_port_vlan->vid; + + if (adding && mlxsw_sp_port->security) { + mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac, + vid, bridge_port->dev, false, true); + return; + } do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, @@ -2580,7 +3192,8 @@ do_fdb_op: if (!do_notification) return; type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; - mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding); + mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding, + false); return; @@ -2610,7 +3223,7 @@ __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, if (err) return err; - dev = __dev_get_by_index(&init_net, nve_ifindex); + dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex); if (!dev) return -EINVAL; *nve_dev = dev; @@ -2687,7 +3300,7 @@ static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE; - mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding); + mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false); mlxsw_sp_fid_put(fid); @@ -2737,19 +3350,15 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, } } -static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; - - mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, - msecs_to_jiffies(bridge->fdb_notify.interval)); -} +#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) { struct mlxsw_sp_bridge *bridge; struct mlxsw_sp *mlxsw_sp; + bool reschedule = false; char *sfn_pl; + int queries; u8 num_rec; int i; int err; @@ -2762,24 +3371,36 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = bridge->mlxsw_sp; rtnl_lock(); - mlxsw_reg_sfn_pack(sfn_pl); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); - if (err) { - dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + if (list_empty(&bridge->bridges_list)) goto out; + reschedule = true; + queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION; + while (queries > 0) { + mlxsw_reg_sfn_pack(sfn_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + goto out; + } + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); + if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT) + goto out; + queries--; } - num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); - for (i = 0; i < num_rec; i++) - mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); out: rtnl_unlock(); kfree(sfn_pl); - mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); + if (!reschedule) + return; + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries); } struct mlxsw_sp_switchdev_event_work { struct work_struct work; + netdevice_tracker dev_tracker; union { struct switchdev_notifier_fdb_info fdb_info; struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info; @@ -2822,7 +3443,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, &vxlan_fdb_info.info, NULL); mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, vxlan_fdb_info.eth_addr, - fdb_info->vid, dev, true); + fdb_info->vid, dev, true, false); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, @@ -2854,7 +3475,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * return; if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && - !switchdev_work->fdb_info.added_by_user) + (!switchdev_work->fdb_info.added_by_user || + switchdev_work->fdb_info.is_local)) return; if (!netif_running(dev)) @@ -2909,20 +3531,20 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; - if (!fdb_info->added_by_user) + if (!fdb_info->added_by_user || fdb_info->is_local) break; err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); if (err) break; mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, fdb_info->addr, - fdb_info->vid, dev, true); + fdb_info->vid, dev, true, false); break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: /* These events are only used to potentially update an existing * SPAN mirror. @@ -2935,8 +3557,8 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) out: rtnl_unlock(); kfree(switchdev_work->fdb_info.addr); + netdev_put(dev, &switchdev_work->dev_tracker); kfree(switchdev_work); - dev_put(dev); } static void @@ -2947,7 +3569,6 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = switchdev_work->dev; - u8 all_zeros_mac[ETH_ALEN] = { 0 }; enum mlxsw_sp_l3proto proto; union mlxsw_sp_l3addr addr; struct net_device *br_dev; @@ -2969,7 +3590,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, &proto, &addr); - if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) { err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr); if (err) { mlxsw_sp_fid_put(fid); @@ -3000,7 +3621,8 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, &vxlan_fdb_info->info, NULL); mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, - vxlan_fdb_info->eth_addr, vid, dev, true); + vxlan_fdb_info->eth_addr, vid, dev, true, + false); mlxsw_sp_fid_put(fid); @@ -3020,13 +3642,14 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device *bridge_device; struct net_device *dev = switchdev_work->dev; struct net_device *br_dev = netdev_master_upper_dev_get(dev); - u8 all_zeros_mac[ETH_ALEN] = { 0 }; enum mlxsw_sp_l3proto proto; union mlxsw_sp_l3addr addr; struct mlxsw_sp_fid *fid; u16 vid; vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; + if (!vxlan_fdb_info->offloaded) + return; bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); if (!bridge_device) @@ -3039,7 +3662,7 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip, &proto, &addr); - if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) { + if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) { mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr); mlxsw_sp_fid_put(fid); return; @@ -3050,7 +3673,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, false, false); vid = bridge_device->ops->fid_vid(bridge_device, fid); mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, - vxlan_fdb_info->eth_addr, vid, dev, false); + vxlan_fdb_info->eth_addr, vid, dev, false, + false); mlxsw_sp_fid_put(fid); } @@ -3087,8 +3711,8 @@ static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work) out: rtnl_unlock(); + netdev_put(dev, &switchdev_work->dev_tracker); kfree(switchdev_work); - dev_put(dev); } static int @@ -3144,6 +3768,13 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, struct net_device *br_dev; int err; + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_attr_set); + return notifier_from_errno(err); + } + /* Tunnel devices are not our uppers, so check their master instead */ br_dev = netdev_master_upper_dev_get_rcu(dev); if (!br_dev) @@ -3161,9 +3792,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, switchdev_work->event = event; switch (event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + case SWITCHDEV_FDB_ADD_TO_BRIDGE: case SWITCHDEV_FDB_DEL_TO_BRIDGE: fdb_info = container_of(info, struct switchdev_notifier_fdb_info, @@ -3181,9 +3812,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, * upper device containig mlxsw_sp_port or just a * mlxsw_sp_port */ - dev_hold(dev); + netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC); break; - case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE: INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_vxlan_fdb_event_work); @@ -3191,7 +3822,7 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, info); if (err) goto err_vxlan_work_prepare; - dev_hold(dev); + netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC); break; default: kfree(switchdev_work); @@ -3231,8 +3862,10 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, * the lookup function to return 'vxlan_dev' */ if (flag_untagged && flag_pvid && - mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) + mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) { + NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI"); return -EINVAL; + } if (!netif_running(vxlan_dev)) return 0; @@ -3245,8 +3878,8 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!fid) { if (!flag_untagged || !flag_pvid) return 0; - return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, - vxlan_dev, vid, extack); + return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, + vid, extack); } /* Second case: FID is associated with the VNI and the VLAN associated @@ -3260,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fid_put(fid); return -EINVAL; } - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); mlxsw_sp_fid_put(fid); return 0; } @@ -3276,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently * mapped to the VNI should be unmapped */ - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); mlxsw_sp_fid_put(fid); /* Fifth case: The new VLAN is also egress untagged, which means the @@ -3285,16 +3918,14 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!flag_untagged) return 0; - err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, - extack); + err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack); if (err) goto err_vxlan_join; return 0; err_vxlan_join: - mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, - NULL); + bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL); return err; } @@ -3318,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fid_8021q_vid(fid) != vid) goto out; - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); out: mlxsw_sp_fid_put(fid); @@ -3333,12 +3964,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - struct switchdev_trans *trans = port_obj_info->trans; struct mlxsw_sp_bridge_device *bridge_device; struct netlink_ext_ack *extack; struct mlxsw_sp *mlxsw_sp; struct net_device *br_dev; - u16 vid; extack = switchdev_notifier_info_to_extack(&port_obj_info->info); br_dev = netdev_master_upper_dev_get(vxlan_dev); @@ -3351,9 +3980,6 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, port_obj_info->handled = true; - if (switchdev_trans_ph_commit(trans)) - return 0; - bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); if (!bridge_device) return -EINVAL; @@ -3361,18 +3987,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, if (!bridge_device->vlan_enabled) return 0; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - int err; - - err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, - vxlan_dev, vid, - flag_untagged, - flag_pvid, extack); - if (err) - return err; - } - - return 0; + return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, + vxlan_dev, vlan->vid, + flag_untagged, + flag_pvid, extack); } static void @@ -3385,7 +4003,6 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev, struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp *mlxsw_sp; struct net_device *br_dev; - u16 vid; br_dev = netdev_master_upper_dev_get(vxlan_dev); if (!br_dev) @@ -3404,9 +4021,8 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev, if (!bridge_device->vlan_enabled) return; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) - mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, - vxlan_dev, vid); + mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev, + vlan->vid); } static int @@ -3465,6 +4081,11 @@ static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused, mlxsw_sp_port_dev_check, mlxsw_sp_port_obj_del); return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_attr_set); + return notifier_from_errno(err); } return NOTIFY_DONE; @@ -3507,7 +4128,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; - mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); return 0; err_register_switchdev_blocking_notifier: @@ -3527,6 +4147,24 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); } +static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = { + .init = mlxsw_sp1_switchdev_init, +}; + +static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops; +} + +const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = { + .init = mlxsw_sp2_switchdev_init, +}; + int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -3542,6 +4180,8 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; + mlxsw_sp->switchdev_ops->init(mlxsw_sp); + return mlxsw_sp_fdb_init(mlxsw_sp); } @@ -3552,11 +4192,3 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->bridge); } -void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; -} - -void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) -{ -} |
