diff options
Diffstat (limited to 'drivers/net/dsa/sja1105/sja1105_main.c')
| -rw-r--r-- | drivers/net/dsa/sja1105/sja1105_main.c | 2583 |
1 files changed, 1089 insertions, 1494 deletions
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 4f0545605f6b..aa2145cf29a6 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -15,30 +15,41 @@ #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> -#include <linux/of_device.h> -#include <linux/pcs/pcs-xpcs.h> #include <linux/netdev_features.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> #include <linux/if_ether.h> #include <linux/dsa/8021q.h> +#include <linux/units.h> + #include "sja1105.h" #include "sja1105_tas.h" #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull -#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) - -static const struct dsa_switch_ops sja1105_switch_ops; -static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, - unsigned int startup_delay) +/* Configure the optional reset pin and bring up switch */ +static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len, + unsigned int startup_delay) { + struct gpio_desc *gpio; + + gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + if (!gpio) + return 0; + gpiod_set_value_cansleep(gpio, 1); /* Wait for minimum reset pulse length */ msleep(pulse_len); gpiod_set_value_cansleep(gpio, 0); /* Wait until chip is ready after reset */ msleep(startup_delay); + + gpiod_put(gpio); + + return 0; } static void @@ -57,6 +68,94 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd, return !!(l2_fwd[from].reach_port & BIT(to)); } +static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) +{ + struct sja1105_vlan_lookup_entry *vlan; + int count, i; + + vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; + count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; + + for (i = 0; i < count; i++) + if (vlan[i].vlanid == vid) + return i; + + /* Return an invalid entry index if not found */ + return -1; +} + +static int sja1105_drop_untagged(struct dsa_switch *ds, int port, bool drop) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_mac_config_entry *mac; + + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; + + if (mac[port].drpuntag == drop) + return 0; + + mac[port].drpuntag = drop; + + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); +} + +static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) +{ + struct sja1105_mac_config_entry *mac; + + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; + + if (mac[port].vlanid == pvid) + return 0; + + mac[port].vlanid = pvid; + + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); +} + +static int sja1105_commit_pvid(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *br = dsa_port_bridge_dev_get(dp); + struct sja1105_private *priv = ds->priv; + struct sja1105_vlan_lookup_entry *vlan; + bool drop_untagged = false; + int match, rc; + u16 pvid; + + if (br && br_vlan_enabled(br)) + pvid = priv->bridge_pvid[port]; + else + pvid = priv->tag_8021q_pvid[port]; + + rc = sja1105_pvid_apply(priv, port, pvid); + if (rc) + return rc; + + /* Only force dropping of untagged packets when the port is under a + * VLAN-aware bridge. When the tag_8021q pvid is used, we are + * deliberately removing the RX VLAN from the port's VMEMB_PORT list, + * to prevent DSA tag spoofing from the link partner. Untagged packets + * are the only ones that should be received with tag_8021q, so + * definitely don't drop them. + */ + if (pvid == priv->bridge_pvid[port]) { + vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; + + match = sja1105_is_vlan_configured(priv, pvid); + + if (match < 0 || !(vlan[match].vmemb_port & BIT(port))) + drop_untagged = true; + } + + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + drop_untagged = true; + + return sja1105_drop_untagged(ds, port, drop_untagged); +} + static int sja1105_init_mac_settings(struct sja1105_private *priv) { struct sja1105_mac_config_entry default_mac = { @@ -101,7 +200,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) struct sja1105_mac_config_entry *mac; struct dsa_switch *ds = priv->ds; struct sja1105_table *table; - int i; + struct dsa_port *dp; table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG]; @@ -120,16 +219,27 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) mac = table->entries; - for (i = 0; i < ds->num_ports; i++) { - mac[i] = default_mac; - if (i == dsa_upstream_port(priv->ds, i)) { - /* STP doesn't get called for CPU port, so we need to - * set the I/O parameters statically. - */ - mac[i].dyn_learn = true; - mac[i].ingress = true; - mac[i].egress = true; - } + list_for_each_entry(dp, &ds->dst->ports, list) { + if (dp->ds != ds) + continue; + + mac[dp->index] = default_mac; + + /* Let sja1105_bridge_stp_state_set() keep address learning + * enabled for the DSA ports. CPU ports use software-assisted + * learning to ensure that only FDB entries belonging to the + * bridge are learned, and that they are learned towards all + * CPU ports in a cross-chip topology if multiple CPU ports + * exist. + */ + if (dsa_port_is_dsa(dp)) + dp->learning = true; + + /* Disallow untagged packets from being received on the + * CPU and DSA ports. + */ + if (dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)) + mac[dp->index].drpuntag = true; } return 0; @@ -283,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) .start_dynspc = 0, /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */ .poly = 0x97, - /* This selects between Independent VLAN Learning (IVL) and - * Shared VLAN Learning (SVL) - */ - .shared_learn = true, + /* Always use Independent VLAN Learning (IVL) */ + .shared_learn = false, /* Don't discard management traffic based on ENFPORT - * we don't perform SMAC port enforcement anyway, so * what we are setting here doesn't matter. @@ -380,8 +488,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) table->entry_count = 1; for (port = 0; port < ds->num_ports; port++) { - struct sja1105_bridge_vlan *v; - if (dsa_is_unused_port(ds, port)) continue; @@ -389,16 +495,10 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) pvid.vlan_bc |= BIT(port); pvid.tag_port &= ~BIT(port); - v = kzalloc(sizeof(*v), GFP_KERNEL); - if (!v) - return -ENOMEM; - - v->port = port; - v->vid = SJA1105_DEFAULT_VLAN; - v->untagged = true; - if (dsa_is_cpu_port(ds, port)) - v->pvid = true; - list_add(&v->list, &priv->dsa_8021q_vlans); + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + priv->tag_8021q_pvid[port] = SJA1105_DEFAULT_VLAN; + priv->bridge_pvid[port] = SJA1105_DEFAULT_VLAN; + } } ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; @@ -409,8 +509,11 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv) { struct sja1105_l2_forwarding_entry *l2fwd; struct dsa_switch *ds = priv->ds; + struct dsa_switch_tree *dst; struct sja1105_table *table; - int i, j; + struct dsa_link *dl; + int port, tc; + int from, to; table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING]; @@ -428,47 +531,109 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv) l2fwd = table->entries; - /* First 5 entries define the forwarding rules */ - for (i = 0; i < ds->num_ports; i++) { - unsigned int upstream = dsa_upstream_port(priv->ds, i); + /* First 5 entries in the L2 Forwarding Table define the forwarding + * rules and the VLAN PCP to ingress queue mapping. + * Set up the ingress queue mapping first. + */ + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; - if (dsa_is_unused_port(ds, i)) + for (tc = 0; tc < SJA1105_NUM_TC; tc++) + l2fwd[port].vlan_pmap[tc] = tc; + } + + /* Then manage the forwarding domain for user ports. These can forward + * only to the always-on domain (CPU port and DSA links) + */ + for (from = 0; from < ds->num_ports; from++) { + if (!dsa_is_user_port(ds, from)) continue; - for (j = 0; j < SJA1105_NUM_TC; j++) - l2fwd[i].vlan_pmap[j] = j; + for (to = 0; to < ds->num_ports; to++) { + if (!dsa_is_cpu_port(ds, to) && + !dsa_is_dsa_port(ds, to)) + continue; - /* All ports start up with egress flooding enabled, - * including the CPU port. - */ - priv->ucast_egress_floods |= BIT(i); - priv->bcast_egress_floods |= BIT(i); + l2fwd[from].bc_domain |= BIT(to); + l2fwd[from].fl_domain |= BIT(to); + + sja1105_port_allow_traffic(l2fwd, from, to, true); + } + } - if (i == upstream) + /* Then manage the forwarding domain for DSA links and CPU ports (the + * always-on domain). These can send packets to any enabled port except + * themselves. + */ + for (from = 0; from < ds->num_ports; from++) { + if (!dsa_is_cpu_port(ds, from) && !dsa_is_dsa_port(ds, from)) continue; - sja1105_port_allow_traffic(l2fwd, i, upstream, true); - sja1105_port_allow_traffic(l2fwd, upstream, i, true); + for (to = 0; to < ds->num_ports; to++) { + if (dsa_is_unused_port(ds, to)) + continue; - l2fwd[i].bc_domain = BIT(upstream); - l2fwd[i].fl_domain = BIT(upstream); + if (from == to) + continue; - l2fwd[upstream].bc_domain |= BIT(i); - l2fwd[upstream].fl_domain |= BIT(i); + l2fwd[from].bc_domain |= BIT(to); + l2fwd[from].fl_domain |= BIT(to); + + sja1105_port_allow_traffic(l2fwd, from, to, true); + } + } + + /* In odd topologies ("H" connections where there is a DSA link to + * another switch which also has its own CPU port), TX packets can loop + * back into the system (they are flooded from CPU port 1 to the DSA + * link, and from there to CPU port 2). Prevent this from happening by + * cutting RX from DSA links towards our CPU port, if the remote switch + * has its own CPU port and therefore doesn't need ours for network + * stack termination. + */ + dst = ds->dst; + + list_for_each_entry(dl, &dst->rtable, list) { + if (dl->dp->ds != ds || dl->link_dp->cpu_dp == dl->dp->cpu_dp) + continue; + + from = dl->dp->index; + to = dsa_upstream_port(ds, from); + + dev_warn(ds->dev, + "H topology detected, cutting RX from DSA link %d to CPU port %d to prevent TX packet loops\n", + from, to); + + sja1105_port_allow_traffic(l2fwd, from, to, false); + + l2fwd[from].bc_domain &= ~BIT(to); + l2fwd[from].fl_domain &= ~BIT(to); + } + + /* Finally, manage the egress flooding domain. All ports start up with + * flooding enabled, including the CPU port and DSA links. + */ + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; + + priv->ucast_egress_floods |= BIT(port); + priv->bcast_egress_floods |= BIT(port); } /* Next 8 entries define VLAN PCP mapping from ingress to egress. * Create a one-to-one mapping. */ - for (i = 0; i < SJA1105_NUM_TC; i++) { - for (j = 0; j < ds->num_ports; j++) { - if (dsa_is_unused_port(ds, j)) + for (tc = 0; tc < SJA1105_NUM_TC; tc++) { + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) continue; - l2fwd[ds->num_ports + i].vlan_pmap[j] = i; + l2fwd[ds->num_ports + tc].vlan_pmap[port] = tc; } - l2fwd[ds->num_ports + i].type_egrpcp2outputq = true; + l2fwd[ds->num_ports + tc].type_egrpcp2outputq = true; } return 0; @@ -547,18 +712,11 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv) { struct sja1105_l2_forwarding_params_entry *l2_fwd_params; struct sja1105_vl_forwarding_params_entry *vl_fwd_params; - int max_mem = priv->info->max_frame_mem; struct sja1105_table *table; - /* VLAN retagging is implemented using a loopback port that consumes - * frame buffers. That leaves less for us. - */ - if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT) - max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD; - table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS]; l2_fwd_params = table->entries; - l2_fwd_params->part_spc[0] = max_mem; + l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY; /* If we have any critical-traffic virtual links, we need to reserve * some frame buffer memory for them. At the moment, hardcode the value @@ -630,6 +788,72 @@ static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv) general_params->tdmaconfigidx = tdmaconfigidx; } +static int sja1105_init_topology(struct sja1105_private *priv, + struct sja1105_general_params_entry *general_params) +{ + struct dsa_switch *ds = priv->ds; + int port; + + /* The host port is the destination for traffic matching mac_fltres1 + * and mac_fltres0 on all ports except itself. Default to an invalid + * value. + */ + general_params->host_port = ds->num_ports; + + /* Link-local traffic received on casc_port will be forwarded + * to host_port without embedding the source port and device ID + * info in the destination MAC address, and no RX timestamps will be + * taken either (presumably because it is a cascaded port and a + * downstream SJA switch already did that). + * To disable the feature, we need to do different things depending on + * switch generation. On SJA1105 we need to set an invalid port, while + * on SJA1110 which support multiple cascaded ports, this field is a + * bitmask so it must be left zero. + */ + if (!priv->info->multiple_cascade_ports) + general_params->casc_port = ds->num_ports; + + for (port = 0; port < ds->num_ports; port++) { + bool is_upstream = dsa_is_upstream_port(ds, port); + bool is_dsa_link = dsa_is_dsa_port(ds, port); + + /* Upstream ports can be dedicated CPU ports or + * upstream-facing DSA links + */ + if (is_upstream) { + if (general_params->host_port == ds->num_ports) { + general_params->host_port = port; + } else { + dev_err(ds->dev, + "Port %llu is already a host port, configuring %d as one too is not supported\n", + general_params->host_port, port); + return -EINVAL; + } + } + + /* Cascade ports are downstream-facing DSA links */ + if (is_dsa_link && !is_upstream) { + if (priv->info->multiple_cascade_ports) { + general_params->casc_port |= BIT(port); + } else if (general_params->casc_port == ds->num_ports) { + general_params->casc_port = port; + } else { + dev_err(ds->dev, + "Port %llu is already a cascade port, configuring %d as one too is not supported\n", + general_params->casc_port, port); + return -EINVAL; + } + } + } + + if (general_params->host_port == ds->num_ports) { + dev_err(ds->dev, "No host port configured\n"); + return -EINVAL; + } + + return 0; +} + static int sja1105_init_general_params(struct sja1105_private *priv) { struct sja1105_general_params_entry default_general_params = { @@ -642,18 +866,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv) .hostprio = 7, .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A, .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK, - .incl_srcpt1 = false, - .send_meta1 = false, + .incl_srcpt1 = true, + .send_meta1 = true, .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B, .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK, - .incl_srcpt0 = false, - .send_meta0 = false, - /* The destination for traffic matching mac_fltres1 and - * mac_fltres0 on all ports except host_port. Such traffic - * receieved on host_port itself would be dropped, except - * by installing a temporary 'management route' - */ - .host_port = priv->ds->num_ports, + .incl_srcpt0 = true, + .send_meta0 = true, /* Default to an invalid value */ .mirr_port = priv->ds->num_ports, /* No TTEthernet */ @@ -673,16 +891,12 @@ static int sja1105_init_general_params(struct sja1105_private *priv) .header_type = ETH_P_SJA1110, }; struct sja1105_general_params_entry *general_params; - struct dsa_switch *ds = priv->ds; struct sja1105_table *table; - int port; + int rc; - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_cpu_port(ds, port)) { - default_general_params.host_port = port; - break; - } - } + rc = sja1105_init_topology(priv, &default_general_params); + if (rc) + return rc; table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; @@ -705,19 +919,6 @@ static int sja1105_init_general_params(struct sja1105_private *priv) sja1110_select_tdmaconfigidx(priv); - /* Link-local traffic received on casc_port will be forwarded - * to host_port without embedding the source port and device ID - * info in the destination MAC address, and no RX timestamps will be - * taken either (presumably because it is a cascaded port and a - * downstream SJA switch already did that). - * To disable the feature, we need to do different things depending on - * switch generation. On SJA1105 we need to set an invalid port, while - * on SJA1110 which support multiple cascaded ports, this field is a - * bitmask so it must be left zero. - */ - if (!priv->info->multiple_cascade_ports) - general_params->casc_port = ds->num_ports; - return 0; } @@ -837,7 +1038,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) policing[bcast].sharindx = port; /* Only SJA1110 has multicast policers */ - if (mcast <= table->ops->max_entry_count) + if (mcast < table->ops->max_entry_count) policing[mcast].sharindx = port; } @@ -845,7 +1046,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) for (port = 0; port < ds->num_ports; port++) { int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; - if (dsa_is_cpu_port(priv->ds, port)) + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) mtu += VLAN_HLEN; policing[port].smax = 65535; /* Burst size in bytes */ @@ -907,27 +1108,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv) return sja1105_static_config_upload(priv); } -static int sja1105_parse_rgmii_delays(struct sja1105_private *priv) +/* This is the "new way" for a MAC driver to configure its RGMII delay lines, + * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps" + * properties. It has the advantage of working with fixed links and with PHYs + * that apply RGMII delays too, and the MAC driver needs not perform any + * special checks. + * + * Previously we were acting upon the "phy-mode" property when we were + * operating in fixed-link, basically acting as a PHY, but with a reversed + * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should + * behave as if it is connected to a PHY which has applied RGMII delays in the + * TX direction. So if anything, RX delays should have been added by the MAC, + * but we were adding TX delays. + * + * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall + * back to the legacy behavior and apply delays on fixed-link ports based on + * the reverse interpretation of the phy-mode. This is a deviation from the + * expected default behavior which is to simply apply no delays. To achieve + * that behavior with the new bindings, it is mandatory to specify + * "{rx,tx}-internal-delay-ps" with a value of 0. + */ +static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port, + struct device_node *port_dn) { - struct dsa_switch *ds = priv->ds; - int port; + phy_interface_t phy_mode = priv->phy_mode[port]; + struct device *dev = &priv->spidev->dev; + int rx_delay = -1, tx_delay = -1; - for (port = 0; port < ds->num_ports; port++) { - if (!priv->fixed_link[port]) - continue; + if (!phy_interface_mode_is_rgmii(phy_mode)) + return 0; - if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID || - priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) - priv->rgmii_rx_delay[port] = true; + of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay); + of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay); - if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID || - priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID) - priv->rgmii_tx_delay[port] = true; + if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) { + dev_warn(dev, + "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, " + "please update device tree to specify \"rx-internal-delay-ps\" and " + "\"tx-internal-delay-ps\"", + port); - if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) && - !priv->info->setup_rgmii_delay) - return -EINVAL; + if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || + phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + rx_delay = 2000; + + if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || + phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + tx_delay = 2000; } + + if (rx_delay < 0) + rx_delay = 0; + if (tx_delay < 0) + tx_delay = 0; + + if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) { + dev_err(dev, "Chip cannot apply RGMII delays\n"); + return -EINVAL; + } + + if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) || + (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) || + (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) || + (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) { + dev_err(dev, + "port %d RGMII delay values out of range, must be between %d and %d ps\n", + port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS); + return -ERANGE; + } + + priv->rgmii_rx_delay_ps[port] = rx_delay; + priv->rgmii_tx_delay_ps[port] = tx_delay; + return 0; } @@ -935,9 +1187,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, struct device_node *ports_node) { struct device *dev = &priv->spidev->dev; - struct device_node *child; - for_each_available_child_of_node(ports_node, child) { + for_each_available_child_of_node_scoped(ports_node, child) { struct device_node *phy_node; phy_interface_t phy_mode; u32 index; @@ -947,7 +1198,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, if (of_property_read_u32(child, "reg", &index) < 0) { dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); - of_node_put(child); return -ENODEV; } @@ -957,7 +1207,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", index); - of_node_put(child); return -ENODEV; } @@ -966,7 +1215,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, if (!of_phy_is_fixed_link(child)) { dev_err(dev, "phy-handle or fixed-link " "properties missing!\n"); - of_node_put(child); return -ENODEV; } /* phy-handle is missing, but fixed-link isn't. @@ -978,6 +1226,10 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, } priv->phy_mode[index] = phy_mode; + + err = sja1105_parse_rgmii_delays(priv, index, child); + if (err) + return err; } return 0; @@ -1004,29 +1256,11 @@ static int sja1105_parse_dt(struct sja1105_private *priv) return rc; } -/* Convert link speed from SJA1105 to ethtool encoding */ -static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv, - u64 speed) -{ - if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) - return SPEED_10; - if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) - return SPEED_100; - if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) - return SPEED_1000; - if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS]) - return SPEED_2500; - return SPEED_UNKNOWN; -} - -/* Set link speed in the MAC configuration for a specific port. */ -static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, - int speed_mbps) +static int sja1105_set_port_speed(struct sja1105_private *priv, int port, + int speed_mbps) { struct sja1105_mac_config_entry *mac; - struct device *dev = priv->ds->dev; u64 speed; - int rc; /* On P/Q/R/S, one can read from the device via the MAC reconfiguration * tables. On E/T, MAC reconfig tables are not readable, only writable. @@ -1060,7 +1294,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; break; default: - dev_err(dev, "Invalid speed %iMbps\n", speed_mbps); + dev_err(priv->ds->dev, "Invalid speed %iMbps\n", speed_mbps); return -EINVAL; } @@ -1068,15 +1302,28 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, * table, since this will be used for the clocking setup, and we no * longer need to store it in the static config (already told hardware * we want auto during upload phase). - * Actually for the SGMII port, the MAC is fixed at 1 Gbps and - * we need to configure the PCS only (if even that). */ - if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII) - mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS]; - else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX) - mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS]; - else - mac[port].speed = speed; + mac[port].speed = speed; + + return 0; +} + +/* Write the MAC Configuration Table entry and, if necessary, the CGU settings, + * after a link speedchange for this port. + */ +static int sja1105_set_port_config(struct sja1105_private *priv, int port) +{ + struct sja1105_mac_config_entry *mac; + struct device *dev = priv->ds->dev; + int rc; + + /* On P/Q/R/S, one can read from the device via the MAC reconfiguration + * tables. On E/T, MAC reconfig tables are not readable, only writable. + * We have to *know* what the MAC looks like. For the sake of keeping + * the code common, we'll use the static configuration tables as a + * reasonable approximation for both E/T and P/Q/R/S. + */ + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; /* Write to the dynamic reconfiguration tables */ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, @@ -1098,103 +1345,88 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, return sja1105_clocking_setup_port(priv, port); } -/* The SJA1105 MAC programming model is through the static config (the xMII - * Mode table cannot be dynamically reconfigured), and we have to program - * that early (earlier than PHYLINK calls us, anyway). - * So just error out in case the connected PHY attempts to change the initial - * system interface MII protocol from what is defined in the DT, at least for - * now. - */ -static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port, - phy_interface_t interface) +static struct phylink_pcs * +sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) { - return priv->phy_mode[port] != interface; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct sja1105_private *priv = dp->ds->priv; + + return priv->pcs[dp->index]; } -static void sja1105_mac_config(struct dsa_switch *ds, int port, +static void sja1105_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct dsa_port *dp = dsa_to_port(ds, port); - struct sja1105_private *priv = ds->priv; - struct dw_xpcs *xpcs; - - if (sja1105_phy_mode_mismatch(priv, port, state->interface)) { - dev_err(ds->dev, "Changing PHY mode to %s not supported!\n", - phy_modes(state->interface)); - return; - } - - xpcs = priv->xpcs[port]; - - if (xpcs) - phylink_set_pcs(dp->pl, &xpcs->pcs); } -static void sja1105_mac_link_down(struct dsa_switch *ds, int port, +static void sja1105_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - sja1105_inhibit_tx(ds->priv, BIT(port), true); + struct dsa_port *dp = dsa_phylink_to_port(config); + + sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true); } -static void sja1105_mac_link_up(struct dsa_switch *ds, int port, +static void sja1105_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct sja1105_private *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct sja1105_private *priv = dp->ds->priv; + int port = dp->index; - sja1105_adjust_port_config(priv, port, speed); + if (!sja1105_set_port_speed(priv, port, speed)) + sja1105_set_port_config(priv, port); sja1105_inhibit_tx(priv, BIT(port), false); } -static void sja1105_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { - /* Construct a new mask which exhaustively contains all link features - * supported by the MAC, and then apply that (logical AND) to what will - * be sent to the PHY for "marketing". - */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct sja1105_private *priv = ds->priv; struct sja1105_xmii_params_entry *mii; + phy_interface_t phy_mode; + + phy_mode = priv->phy_mode[port]; + if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_2500BASEX) { + /* Changing the PHY mode on SERDES ports is possible and makes + * sense, because that is done through the XPCS. We allow + * changes between SGMII and 2500base-X. + */ + if (priv->info->supports_sgmii[port]) + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); - mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; - - /* include/linux/phylink.h says: - * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink - * expects the MAC driver to return all supported link modes. - */ - if (state->interface != PHY_INTERFACE_MODE_NA && - sja1105_phy_mode_mismatch(priv, port, state->interface)) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - return; + if (priv->info->supports_2500basex[port]) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + } else { + /* The SJA1105 MAC programming model is through the static + * config (the xMII Mode table cannot be dynamically + * reconfigured), and we have to program that early. + */ + __set_bit(phy_mode, config->supported_interfaces); } /* The MAC does not support pause frames, and also doesn't * support half-duplex traffic modes. */ - phylink_set(mask, Autoneg); - phylink_set(mask, MII); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 100baseT1_Full); + config->mac_capabilities = MAC_10FD | MAC_100FD; + + mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; if (mii->xmii_mode[port] == XMII_MODE_RGMII || mii->xmii_mode[port] == XMII_MODE_SGMII) - phylink_set(mask, 1000baseT_Full); - if (priv->info->supports_2500basex[port]) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } + config->mac_capabilities |= MAC_1000FD; - bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + if (priv->info->supports_2500basex[port]) + config->mac_capabilities |= MAC_2500FD; } static int @@ -1314,10 +1546,11 @@ static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin, int sja1105et_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { - struct sja1105_l2_lookup_entry l2_lookup = {0}; + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; int last_unused = -1; + int start, end, i; int bin, way, rc; bin = sja1105et_fdb_hash(priv, addr, vid); @@ -1329,7 +1562,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, * mask? If yes, we need to do nothing. If not, we need * to rewrite the entry by adding this port to it. */ - if (l2_lookup.destports & BIT(port)) + if ((l2_lookup.destports & BIT(port)) && l2_lookup.lockeds) return 0; l2_lookup.destports |= BIT(port); } else { @@ -1360,6 +1593,7 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, index, NULL, false); } } + l2_lookup.lockeds = true; l2_lookup.index = sja1105et_fdb_index(bin, way); rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, @@ -1368,6 +1602,29 @@ int sja1105et_fdb_add(struct dsa_switch *ds, int port, if (rc < 0) return rc; + /* Invalidate a dynamically learned entry if that exists */ + start = sja1105et_fdb_index(bin, 0); + end = sja1105et_fdb_index(bin, way); + + for (i = start; i < end; i++) { + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + i, &tmp); + if (rc == -ENOENT) + continue; + if (rc) + return rc; + + if (tmp.macaddr != ether_addr_to_u64(addr) || tmp.vlanid != vid) + continue; + + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + i, NULL, false); + if (rc) + return rc; + + break; + } + return sja1105_static_fdb_change(priv, port, &l2_lookup, true); } @@ -1409,32 +1666,30 @@ int sja1105et_fdb_del(struct dsa_switch *ds, int port, int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { - struct sja1105_l2_lookup_entry l2_lookup = {0}; + struct sja1105_l2_lookup_entry l2_lookup = {0}, tmp; struct sja1105_private *priv = ds->priv; int rc, i; /* Search for an existing entry in the FDB table */ l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.vlanid = vid; - l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { - l2_lookup.mask_vlanid = VLAN_VID_MASK; - l2_lookup.mask_iotag = BIT(0); - } else { - l2_lookup.mask_vlanid = 0; - l2_lookup.mask_iotag = 0; - } + l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.destports = BIT(port); + tmp = l2_lookup; + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, - SJA1105_SEARCH, &l2_lookup); - if (rc == 0) { - /* Found and this port is already in the entry's + SJA1105_SEARCH, &tmp); + if (rc == 0 && tmp.index != SJA1105_MAX_L2_LOOKUP_COUNT - 1) { + /* Found a static entry and this port is already in the entry's * port mask => job done */ - if (l2_lookup.destports & BIT(port)) + if ((tmp.destports & BIT(port)) && tmp.lockeds) return 0; + + l2_lookup = tmp; + /* l2_lookup.index is populated by the switch in case it * found something. */ @@ -1456,16 +1711,46 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, dev_err(ds->dev, "FDB is full, cannot add entry.\n"); return -EINVAL; } - l2_lookup.lockeds = true; l2_lookup.index = i; skip_finding_an_index: + l2_lookup.lockeds = true; + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, l2_lookup.index, &l2_lookup, true); if (rc < 0) return rc; + /* The switch learns dynamic entries and looks up the FDB left to + * right. It is possible that our addition was concurrent with the + * dynamic learning of the same address, so now that the static entry + * has been installed, we are certain that address learning for this + * particular address has been turned off, so the dynamic entry either + * is in the FDB at an index smaller than the static one, or isn't (it + * can also be at a larger index, but in that case it is inactive + * because the static FDB entry will match first, and the dynamic one + * will eventually age out). Search for a dynamically learned address + * prior to our static one and invalidate it. + */ + tmp = l2_lookup; + + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + SJA1105_SEARCH, &tmp); + if (rc < 0) { + dev_err(ds->dev, + "port %d failed to read back entry for %pM vid %d: %pe\n", + port, addr, vid, ERR_PTR(rc)); + return rc; + } + + if (tmp.index < l2_lookup.index) { + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + tmp.index, NULL, false); + if (rc < 0) + return rc; + } + return sja1105_static_fdb_change(priv, port, &l2_lookup, true); } @@ -1479,15 +1764,8 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, l2_lookup.macaddr = ether_addr_to_u64(addr); l2_lookup.vlanid = vid; - l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (priv->vlan_state != SJA1105_VLAN_UNAWARE) { - l2_lookup.mask_vlanid = VLAN_VID_MASK; - l2_lookup.mask_iotag = BIT(0); - } else { - l2_lookup.mask_vlanid = 0; - l2_lookup.mask_iotag = 0; - } + l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.destports = BIT(port); rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, @@ -1514,36 +1792,68 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, } static int sja1105_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; + int rc; - /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, - * so the switch still does some VLAN processing internally. - * But Shared VLAN Learning (SVL) is also active, and it will take - * care of autonomous forwarding between the unique pvid's of each - * port. Here we just make sure that users can't add duplicate FDB - * entries when in this mode - the actual VID doesn't matter except - * for what gets printed in 'bridge fdb show'. In the case of zero, - * no VID gets printed at all. - */ - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) - vid = 0; + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } + + mutex_lock(&priv->fdb_lock); + rc = priv->info->fdb_add_cmd(ds, port, addr, vid); + mutex_unlock(&priv->fdb_lock); - return priv->info->fdb_add_cmd(ds, port, addr, vid); + return rc; } -static int sja1105_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +static int __sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) { struct sja1105_private *priv = ds->priv; - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL) - vid = 0; + if (!vid) { + switch (db.type) { + case DSA_DB_PORT: + vid = dsa_tag_8021q_standalone_vid(db.dp); + break; + case DSA_DB_BRIDGE: + vid = dsa_tag_8021q_bridge_vid(db.bridge.num); + break; + default: + return -EOPNOTSUPP; + } + } return priv->info->fdb_del_cmd(ds, port, addr, vid); } +static int sja1105_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + mutex_lock(&priv->fdb_lock); + rc = __sja1105_fdb_del(ds, port, addr, vid, db); + mutex_unlock(&priv->fdb_lock); + + return rc; +} + static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -1575,31 +1885,88 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, if (!(l2_lookup.destports & BIT(port))) continue; - /* We need to hide the FDB entry for unknown multicast */ - if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST && - l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST) - continue; - u64_to_ether_addr(l2_lookup.macaddr, macaddr); + /* Hardware FDB is shared for fdb and mdb, "bridge fdb show" + * only wants to see unicast + */ + if (is_multicast_ether_addr(macaddr)) + continue; + /* We need to hide the dsa_8021q VLANs from the user. */ - if (priv->vlan_state == SJA1105_VLAN_UNAWARE) + if (vid_is_dsa_8021q(l2_lookup.vlanid)) l2_lookup.vlanid = 0; - cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); + rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); + if (rc) + return rc; } return 0; } +static void sja1105_fast_age(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct sja1105_private *priv = ds->priv; + struct dsa_db db = { + .type = DSA_DB_BRIDGE, + .bridge = { + .dev = dsa_port_bridge_dev_get(dp), + .num = dsa_port_bridge_num_get(dp), + }, + }; + int i; + + mutex_lock(&priv->fdb_lock); + + for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { + struct sja1105_l2_lookup_entry l2_lookup = {0}; + u8 macaddr[ETH_ALEN]; + int rc; + + rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, + i, &l2_lookup); + /* No fdb entry at i, not an issue */ + if (rc == -ENOENT) + continue; + if (rc) { + dev_err(ds->dev, "Failed to read FDB: %pe\n", + ERR_PTR(rc)); + break; + } + + if (!(l2_lookup.destports & BIT(port))) + continue; + + /* Don't delete static FDB entries */ + if (l2_lookup.lockeds) + continue; + + u64_to_ether_addr(l2_lookup.macaddr, macaddr); + + rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db); + if (rc) { + dev_err(ds->dev, + "Failed to delete FDB entry %pM vid %lld: %pe\n", + macaddr, l2_lookup.vlanid, ERR_PTR(rc)); + break; + } + } + + mutex_unlock(&priv->fdb_lock); +} + static int sja1105_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { - return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid); + return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db); } static int sja1105_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { - return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid); + return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db); } /* Common function for unicast and broadcast flood configuration. @@ -1647,7 +2014,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv) } static int sja1105_bridge_member(struct dsa_switch *ds, int port, - struct net_device *br, bool member) + struct dsa_bridge bridge, bool member) { struct sja1105_l2_forwarding_entry *l2_fwd; struct sja1105_private *priv = ds->priv; @@ -1672,7 +2039,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port, */ if (i == port) continue; - if (dsa_to_port(ds, i)->bridge_dev != br) + if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) continue; sja1105_port_allow_traffic(l2_fwd, i, port, member); sja1105_port_allow_traffic(l2_fwd, port, i, member); @@ -1688,12 +2055,17 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port, if (rc) return rc; + rc = sja1105_commit_pvid(ds, port); + if (rc) + return rc; + return sja1105_manage_flood_domains(priv); } static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { + struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; struct sja1105_mac_config_entry *mac; @@ -1702,6 +2074,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: /* From UM10944 description of DRPDTAG (why put this there?): * "Management traffic flows to the port regardless of the state * of the INGRESS flag". So BPDUs are still be allowed to pass. @@ -1711,20 +2084,15 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, mac[port].egress = false; mac[port].dyn_learn = false; break; - case BR_STATE_LISTENING: - mac[port].ingress = true; - mac[port].egress = false; - mac[port].dyn_learn = false; - break; case BR_STATE_LEARNING: mac[port].ingress = true; mac[port].egress = false; - mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); + mac[port].dyn_learn = dp->learning; break; case BR_STATE_FORWARDING: mac[port].ingress = true; mac[port].egress = true; - mac[port].dyn_learn = !!(priv->learn_ena & BIT(port)); + mac[port].dyn_learn = dp->learning; break; default: dev_err(ds->dev, "invalid STP state: %d\n", state); @@ -1736,23 +2104,63 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, } static int sja1105_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { - return sja1105_bridge_member(ds, port, br, true); + int rc; + + rc = sja1105_bridge_member(ds, port, bridge, true); + if (rc) + return rc; + + rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload, + extack); + if (rc) { + sja1105_bridge_member(ds, port, bridge, false); + return rc; + } + + return 0; } static void sja1105_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { - sja1105_bridge_member(ds, port, br, false); + dsa_tag_8021q_bridge_leave(ds, port, bridge); + sja1105_bridge_member(ds, port, bridge, false); } -#define BYTES_PER_KBIT (1000LL / 8) +/* Port 0 (the uC port) does not have CBS shapers */ +#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio)) + +static int sja1105_find_cbs_shaper(struct sja1105_private *priv, + int port, int prio) +{ + int i; + + if (priv->info->fixed_cbs_mapping) { + i = SJA1110_FIXED_CBS(port, prio); + if (i >= 0 && i < priv->info->num_cbs_shapers) + return i; + + return -1; + } + + for (i = 0; i < priv->info->num_cbs_shapers; i++) + if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) + return i; + + return -1; +} static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) { int i; + if (priv->info->fixed_cbs_mapping) + return -1; + for (i = 0; i < priv->info->num_cbs_shapers; i++) if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope) return i; @@ -1783,14 +2191,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct sja1105_cbs_entry *cbs; + s64 port_transmit_rate_kbps; int index; if (!offload->enable) return sja1105_delete_cbs_shaper(priv, port, offload->queue); - index = sja1105_find_unused_cbs_shaper(priv); - if (index < 0) - return -ENOSPC; + /* The user may be replacing an existing shaper */ + index = sja1105_find_cbs_shaper(priv, port, offload->queue); + if (index < 0) { + /* That isn't the case - see if we can allocate a new one */ + index = sja1105_find_unused_cbs_shaper(priv); + if (index < 0) + return -ENOSPC; + } cbs = &priv->cbs[index]; cbs->port = port; @@ -1800,9 +2214,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, */ cbs->credit_hi = offload->hicredit; cbs->credit_lo = abs(offload->locredit); - /* User space is in kbits/sec, hardware in bytes/sec */ - cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; - cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); + /* User space is in kbits/sec, while the hardware in bytes/sec times + * link speed. Since the given offload->sendslope is good only for the + * current link speed anyway, and user space is likely to reprogram it + * when that changes, don't even bother to track the port's link speed, + * but deduce the port transmit rate from idleslope - sendslope. + */ + port_transmit_rate_kbps = offload->idleslope - offload->sendslope; + cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, + port_transmit_rate_kbps); + cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), + port_transmit_rate_kbps); /* Convert the negative values from 64-bit 2's complement * to 32-bit 2's complement (for the case of 0x80000000 whose * negative is still negative). @@ -1841,7 +2263,6 @@ static int sja1105_reload_cbs(struct sja1105_private *priv) static const char * const sja1105_reset_reasons[] = { [SJA1105_VLAN_FILTERING] = "VLAN filtering", - [SJA1105_RX_HWTSTAMPING] = "RX timestamping", [SJA1105_AGEING_TIME] = "Ageing time", [SJA1105_SCHEDULING] = "Time-aware scheduling", [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing", @@ -1859,8 +2280,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv, { struct ptp_system_timestamp ptp_sts_before; struct ptp_system_timestamp ptp_sts_after; - int speed_mbps[SJA1105_MAX_NUM_PORTS]; u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0}; + u64 mac_speed[SJA1105_MAX_NUM_PORTS]; struct sja1105_mac_config_entry *mac; struct dsa_switch *ds = priv->ds; s64 t1, t2, t3, t4; @@ -1868,24 +2289,23 @@ int sja1105_static_config_reload(struct sja1105_private *priv, int rc, i; s64 now; + mutex_lock(&priv->fdb_lock); mutex_lock(&priv->mgmt_lock); mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; - /* Back up the dynamic link speed changed by sja1105_adjust_port_config + /* Back up the dynamic link speed changed by sja1105_set_port_speed() * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the * switch wants to see in the static config in order to allow us to * change it through the dynamic interface later. */ for (i = 0; i < ds->num_ports; i++) { - u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1); - - speed_mbps[i] = sja1105_port_speed_to_ethtool(priv, - mac[i].speed); + mac_speed[i] = mac[i].speed; mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; - if (priv->xpcs[i]) - bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr); + if (priv->pcs[i]) + bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, + MDIO_MMD_VEND2, MDIO_CTRL1); } /* No PTP operations can run right now */ @@ -1940,28 +2360,28 @@ int sja1105_static_config_reload(struct sja1105_private *priv, } for (i = 0; i < ds->num_ports; i++) { - struct dw_xpcs *xpcs = priv->xpcs[i]; - unsigned int mode; + struct phylink_pcs *pcs = priv->pcs[i]; + unsigned int neg_mode; - rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]); + mac[i].speed = mac_speed[i]; + rc = sja1105_set_port_config(priv, i); if (rc < 0) goto out; - if (!xpcs) + if (!pcs) continue; if (bmcr[i] & BMCR_ANENABLE) - mode = MLO_AN_INBAND; - else if (priv->fixed_link[i]) - mode = MLO_AN_FIXED; + neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED; else - mode = MLO_AN_PHY; + neg_mode = PHYLINK_PCS_NEG_OUTBAND; - rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode); + rc = pcs->ops->pcs_config(pcs, neg_mode, priv->phy_mode[i], + NULL, true); if (rc < 0) goto out; - if (!phylink_autoneg_inband(mode)) { + if (neg_mode == PHYLINK_PCS_NEG_OUTBAND) { int speed = SPEED_UNKNOWN; if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX) @@ -1973,8 +2393,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv, else speed = SPEED_10; - xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i], - speed, DUPLEX_FULL); + pcs->ops->pcs_link_up(pcs, neg_mode, priv->phy_mode[i], + speed, DUPLEX_FULL); } } @@ -1983,101 +2403,11 @@ int sja1105_static_config_reload(struct sja1105_private *priv, goto out; out: mutex_unlock(&priv->mgmt_lock); + mutex_unlock(&priv->fdb_lock); return rc; } -static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid) -{ - struct sja1105_mac_config_entry *mac; - - mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; - - mac[port].vlanid = pvid; - - return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, - &mac[port], true); -} - -static int sja1105_crosschip_bridge_join(struct dsa_switch *ds, - int tree_index, int sw_index, - int other_port, struct net_device *br) -{ - struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); - struct sja1105_private *other_priv = other_ds->priv; - struct sja1105_private *priv = ds->priv; - int port, rc; - - if (other_ds->ops != &sja1105_switch_ops) - return 0; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_user_port(ds, port)) - continue; - if (dsa_to_port(ds, port)->bridge_dev != br) - continue; - - rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx, - port, - other_priv->dsa_8021q_ctx, - other_port); - if (rc) - return rc; - - rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx, - other_port, - priv->dsa_8021q_ctx, - port); - if (rc) - return rc; - } - - return 0; -} - -static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds, - int tree_index, int sw_index, - int other_port, - struct net_device *br) -{ - struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index); - struct sja1105_private *other_priv = other_ds->priv; - struct sja1105_private *priv = ds->priv; - int port; - - if (other_ds->ops != &sja1105_switch_ops) - return; - - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_user_port(ds, port)) - continue; - if (dsa_to_port(ds, port)->bridge_dev != br) - continue; - - dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port, - other_priv->dsa_8021q_ctx, - other_port); - - dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx, - other_port, - priv->dsa_8021q_ctx, port); - } -} - -static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled) -{ - struct sja1105_private *priv = ds->priv; - int rc; - - rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled); - if (rc) - return rc; - - dev_info(ds->dev, "%s switch tagging\n", - enabled ? "Enabled" : "Disabled"); - return 0; -} - static enum dsa_tag_protocol sja1105_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) @@ -2087,669 +2417,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port, return priv->info->tag_proto; } -static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid) -{ - int subvlan; - - if (pvid) - return 0; - - for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - if (subvlan_map[subvlan] == VLAN_N_VID) - return subvlan; - - return -1; -} - -static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid) -{ - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - if (subvlan_map[subvlan] == vid) - return subvlan; - - return -1; -} - -static int sja1105_find_committed_subvlan(struct sja1105_private *priv, - int port, u16 vid) -{ - struct sja1105_port *sp = &priv->ports[port]; - - return sja1105_find_subvlan(sp->subvlan_map, vid); -} - -static void sja1105_init_subvlan_map(u16 *subvlan_map) -{ - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - subvlan_map[subvlan] = VLAN_N_VID; -} - -static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port, - u16 *subvlan_map) -{ - struct sja1105_port *sp = &priv->ports[port]; - int subvlan; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - sp->subvlan_map[subvlan] = subvlan_map[subvlan]; -} - -static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid) -{ - struct sja1105_vlan_lookup_entry *vlan; - int count, i; - - vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; - count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count; - - for (i = 0; i < count; i++) - if (vlan[i].vlanid == vid) - return i; - - /* Return an invalid entry index if not found */ - return -1; -} - -static int -sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging, - int count, int from_port, u16 from_vid, - u16 to_vid) -{ - int i; - - for (i = 0; i < count; i++) - if (retagging[i].ing_port == BIT(from_port) && - retagging[i].vlan_ing == from_vid && - retagging[i].vlan_egr == to_vid) - return i; - - /* Return an invalid entry index if not found */ - return -1; -} - -static int sja1105_commit_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int num_retagging) -{ - struct sja1105_retagging_entry *retagging; - struct sja1105_vlan_lookup_entry *vlan; - struct sja1105_table *table; - int num_vlans = 0; - int rc, i, k = 0; - - /* VLAN table */ - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - vlan = table->entries; - - for (i = 0; i < VLAN_N_VID; i++) { - int match = sja1105_is_vlan_configured(priv, i); - - if (new_vlan[i].vlanid != VLAN_N_VID) - num_vlans++; - - if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) { - /* Was there before, no longer is. Delete */ - dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i); - rc = sja1105_dynamic_config_write(priv, - BLK_IDX_VLAN_LOOKUP, - i, &vlan[match], false); - if (rc < 0) - return rc; - } else if (new_vlan[i].vlanid != VLAN_N_VID) { - /* Nothing changed, don't do anything */ - if (match >= 0 && - vlan[match].vlanid == new_vlan[i].vlanid && - vlan[match].tag_port == new_vlan[i].tag_port && - vlan[match].vlan_bc == new_vlan[i].vlan_bc && - vlan[match].vmemb_port == new_vlan[i].vmemb_port) - continue; - /* Update entry */ - dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i); - rc = sja1105_dynamic_config_write(priv, - BLK_IDX_VLAN_LOOKUP, - i, &new_vlan[i], - true); - if (rc < 0) - return rc; - } - } - - if (table->entry_count) - kfree(table->entries); - - table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size, - GFP_KERNEL); - if (!table->entries) - return -ENOMEM; - - table->entry_count = num_vlans; - vlan = table->entries; - - for (i = 0; i < VLAN_N_VID; i++) { - if (new_vlan[i].vlanid == VLAN_N_VID) - continue; - vlan[k++] = new_vlan[i]; - } - - /* VLAN Retagging Table */ - table = &priv->static_config.tables[BLK_IDX_RETAGGING]; - retagging = table->entries; - - for (i = 0; i < table->entry_count; i++) { - rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, - i, &retagging[i], false); - if (rc) - return rc; - } - - if (table->entry_count) - kfree(table->entries); - - table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size, - GFP_KERNEL); - if (!table->entries) - return -ENOMEM; - - table->entry_count = num_retagging; - retagging = table->entries; - - for (i = 0; i < num_retagging; i++) { - retagging[i] = new_retagging[i]; - - /* Update entry */ - rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING, - i, &retagging[i], true); - if (rc < 0) - return rc; - } - - return 0; -} - -struct sja1105_crosschip_vlan { - struct list_head list; - u16 vid; - bool untagged; - int port; - int other_port; - struct dsa_8021q_context *other_ctx; -}; - -struct sja1105_crosschip_switch { - struct list_head list; - struct dsa_8021q_context *other_ctx; -}; - -static int sja1105_commit_pvid(struct sja1105_private *priv) -{ - struct sja1105_bridge_vlan *v; - struct list_head *vlan_list; - int rc = 0; - - if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - vlan_list = &priv->bridge_vlans; - else - vlan_list = &priv->dsa_8021q_vlans; - - list_for_each_entry(v, vlan_list, list) { - if (v->pvid) { - rc = sja1105_pvid_apply(priv, v->port, v->vid); - if (rc) - break; - } - } - - return rc; -} - -static int -sja1105_build_bridge_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan) -{ - struct sja1105_bridge_vlan *v; - - if (priv->vlan_state == SJA1105_VLAN_UNAWARE) - return 0; - - list_for_each_entry(v, &priv->bridge_vlans, list) { - int match = v->vid; - - new_vlan[match].vlanid = v->vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(v->port); - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - } - - return 0; -} - -static int -sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan) -{ - struct sja1105_bridge_vlan *v; - - if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - return 0; - - list_for_each_entry(v, &priv->dsa_8021q_vlans, list) { - int match = v->vid; - - new_vlan[match].vlanid = v->vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(v->port); - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - } - - return 0; -} - -static int sja1105_build_subvlans(struct sja1105_private *priv, - u16 subvlan_map[][DSA_8021Q_N_SUBVLAN], - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int *num_retagging) -{ - struct sja1105_bridge_vlan *v; - int k = *num_retagging; - - if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) - return 0; - - list_for_each_entry(v, &priv->bridge_vlans, list) { - int upstream = dsa_upstream_port(priv->ds, v->port); - int match, subvlan; - u16 rx_vid; - - /* Only sub-VLANs on user ports need to be applied. - * Bridge VLANs also include VLANs added automatically - * by DSA on the CPU port. - */ - if (!dsa_is_user_port(priv->ds, v->port)) - continue; - - subvlan = sja1105_find_subvlan(subvlan_map[v->port], - v->vid); - if (subvlan < 0) { - subvlan = sja1105_find_free_subvlan(subvlan_map[v->port], - v->pvid); - if (subvlan < 0) { - dev_err(priv->ds->dev, "No more free subvlans\n"); - return -ENOSPC; - } - } - - rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan); - - /* @v->vid on @v->port needs to be retagged to @rx_vid - * on @upstream. Assume @v->vid on @v->port and on - * @upstream was already configured by the previous - * iteration over bridge_vlans. - */ - match = rx_vid; - new_vlan[match].vlanid = rx_vid; - new_vlan[match].vmemb_port |= BIT(v->port); - new_vlan[match].vmemb_port |= BIT(upstream); - new_vlan[match].vlan_bc |= BIT(v->port); - new_vlan[match].vlan_bc |= BIT(upstream); - /* The "untagged" flag is set the same as for the - * original VLAN - */ - if (!v->untagged) - new_vlan[match].tag_port |= BIT(v->port); - /* But it's always tagged towards the CPU */ - new_vlan[match].tag_port |= BIT(upstream); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - - /* The Retagging Table generates packet *clones* with - * the new VLAN. This is a very odd hardware quirk - * which we need to suppress by dropping the original - * packet. - * Deny egress of the original VLAN towards the CPU - * port. This will force the switch to drop it, and - * we'll see only the retagged packets. - */ - match = v->vid; - new_vlan[match].vlan_bc &= ~BIT(upstream); - - /* And the retagging itself */ - new_retagging[k].vlan_ing = v->vid; - new_retagging[k].vlan_egr = rx_vid; - new_retagging[k].ing_port = BIT(v->port); - new_retagging[k].egr_port = BIT(upstream); - if (k++ == SJA1105_MAX_RETAGGING_COUNT) { - dev_err(priv->ds->dev, "No more retagging rules\n"); - return -ENOSPC; - } - - subvlan_map[v->port][subvlan] = v->vid; - } - - *num_retagging = k; - - return 0; -} - -/* Sadly, in crosschip scenarios where the CPU port is also the link to another - * switch, we should retag backwards (the dsa_8021q vid to the original vid) on - * the CPU port of neighbour switches. - */ -static int -sja1105_build_crosschip_subvlans(struct sja1105_private *priv, - struct sja1105_vlan_lookup_entry *new_vlan, - struct sja1105_retagging_entry *new_retagging, - int *num_retagging) -{ - struct sja1105_crosschip_vlan *tmp, *pos; - struct dsa_8021q_crosschip_link *c; - struct sja1105_bridge_vlan *v, *w; - struct list_head crosschip_vlans; - int k = *num_retagging; - int rc = 0; - - if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT) - return 0; - - INIT_LIST_HEAD(&crosschip_vlans); - - list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { - struct sja1105_private *other_priv = c->other_ctx->ds->priv; - - if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) - continue; - - /* Crosschip links are also added to the CPU ports. - * Ignore those. - */ - if (!dsa_is_user_port(priv->ds, c->port)) - continue; - if (!dsa_is_user_port(c->other_ctx->ds, c->other_port)) - continue; - - /* Search for VLANs on the remote port */ - list_for_each_entry(v, &other_priv->bridge_vlans, list) { - bool already_added = false; - bool we_have_it = false; - - if (v->port != c->other_port) - continue; - - /* If @v is a pvid on @other_ds, it does not need - * re-retagging, because its SVL field is 0 and we - * already allow that, via the dsa_8021q crosschip - * links. - */ - if (v->pvid) - continue; - - /* Search for the VLAN on our local port */ - list_for_each_entry(w, &priv->bridge_vlans, list) { - if (w->port == c->port && w->vid == v->vid) { - we_have_it = true; - break; - } - } - - if (!we_have_it) - continue; - - list_for_each_entry(tmp, &crosschip_vlans, list) { - if (tmp->vid == v->vid && - tmp->untagged == v->untagged && - tmp->port == c->port && - tmp->other_port == v->port && - tmp->other_ctx == c->other_ctx) { - already_added = true; - break; - } - } - - if (already_added) - continue; - - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) { - dev_err(priv->ds->dev, "Failed to allocate memory\n"); - rc = -ENOMEM; - goto out; - } - tmp->vid = v->vid; - tmp->port = c->port; - tmp->other_port = v->port; - tmp->other_ctx = c->other_ctx; - tmp->untagged = v->untagged; - list_add(&tmp->list, &crosschip_vlans); - } - } - - list_for_each_entry(tmp, &crosschip_vlans, list) { - struct sja1105_private *other_priv = tmp->other_ctx->ds->priv; - int upstream = dsa_upstream_port(priv->ds, tmp->port); - int match, subvlan; - u16 rx_vid; - - subvlan = sja1105_find_committed_subvlan(other_priv, - tmp->other_port, - tmp->vid); - /* If this happens, it's a bug. The neighbour switch does not - * have a subvlan for tmp->vid on tmp->other_port, but it - * should, since we already checked for its vlan_state. - */ - if (WARN_ON(subvlan < 0)) { - rc = -EINVAL; - goto out; - } - - rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds, - tmp->other_port, - subvlan); - - /* The @rx_vid retagged from @tmp->vid on - * {@tmp->other_ds, @tmp->other_port} needs to be - * re-retagged to @tmp->vid on the way back to us. - * - * Assume the original @tmp->vid is already configured - * on this local switch, otherwise we wouldn't be - * retagging its subvlan on the other switch in the - * first place. We just need to add a reverse retagging - * rule for @rx_vid and install @rx_vid on our ports. - */ - match = rx_vid; - new_vlan[match].vlanid = rx_vid; - new_vlan[match].vmemb_port |= BIT(tmp->port); - new_vlan[match].vmemb_port |= BIT(upstream); - /* The "untagged" flag is set the same as for the - * original VLAN. And towards the CPU, it doesn't - * really matter, because @rx_vid will only receive - * traffic on that port. For consistency with other dsa_8021q - * VLANs, we'll keep the CPU port tagged. - */ - if (!tmp->untagged) - new_vlan[match].tag_port |= BIT(tmp->port); - new_vlan[match].tag_port |= BIT(upstream); - new_vlan[match].type_entry = SJA1110_VLAN_D_TAG; - /* Deny egress of @rx_vid towards our front-panel port. - * This will force the switch to drop it, and we'll see - * only the re-retagged packets (having the original, - * pre-initial-retagging, VLAN @tmp->vid). - */ - new_vlan[match].vlan_bc &= ~BIT(tmp->port); - - /* On reverse retagging, the same ingress VLAN goes to multiple - * ports. So we have an opportunity to create composite rules - * to not waste the limited space in the retagging table. - */ - k = sja1105_find_retagging_entry(new_retagging, *num_retagging, - upstream, rx_vid, tmp->vid); - if (k < 0) { - if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) { - dev_err(priv->ds->dev, "No more retagging rules\n"); - rc = -ENOSPC; - goto out; - } - k = (*num_retagging)++; - } - /* And the retagging itself */ - new_retagging[k].vlan_ing = rx_vid; - new_retagging[k].vlan_egr = tmp->vid; - new_retagging[k].ing_port = BIT(upstream); - new_retagging[k].egr_port |= BIT(tmp->port); - } - -out: - list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) { - list_del(&tmp->list); - kfree(tmp); - } - - return rc; -} - -static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify); - -static int sja1105_notify_crosschip_switches(struct sja1105_private *priv) -{ - struct sja1105_crosschip_switch *s, *pos; - struct list_head crosschip_switches; - struct dsa_8021q_crosschip_link *c; - int rc = 0; - - INIT_LIST_HEAD(&crosschip_switches); - - list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) { - bool already_added = false; - - list_for_each_entry(s, &crosschip_switches, list) { - if (s->other_ctx == c->other_ctx) { - already_added = true; - break; - } - } - - if (already_added) - continue; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) { - dev_err(priv->ds->dev, "Failed to allocate memory\n"); - rc = -ENOMEM; - goto out; - } - s->other_ctx = c->other_ctx; - list_add(&s->list, &crosschip_switches); - } - - list_for_each_entry(s, &crosschip_switches, list) { - struct sja1105_private *other_priv = s->other_ctx->ds->priv; - - rc = sja1105_build_vlan_table(other_priv, false); - if (rc) - goto out; - } - -out: - list_for_each_entry_safe(s, pos, &crosschip_switches, list) { - list_del(&s->list); - kfree(s); - } - - return rc; -} - -static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify) -{ - u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN]; - struct sja1105_retagging_entry *new_retagging; - struct sja1105_vlan_lookup_entry *new_vlan; - struct sja1105_table *table; - int i, num_retagging = 0; - int rc; - - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - new_vlan = kcalloc(VLAN_N_VID, - table->ops->unpacked_entry_size, GFP_KERNEL); - if (!new_vlan) - return -ENOMEM; - - table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; - new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT, - table->ops->unpacked_entry_size, GFP_KERNEL); - if (!new_retagging) { - kfree(new_vlan); - return -ENOMEM; - } - - for (i = 0; i < VLAN_N_VID; i++) - new_vlan[i].vlanid = VLAN_N_VID; - - for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++) - new_retagging[i].vlan_ing = VLAN_N_VID; - - for (i = 0; i < priv->ds->num_ports; i++) - sja1105_init_subvlan_map(subvlan_map[i]); - - /* Bridge VLANs */ - rc = sja1105_build_bridge_vlans(priv, new_vlan); - if (rc) - goto out; - - /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c: - * - RX VLANs - * - TX VLANs - * - Crosschip links - */ - rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan); - if (rc) - goto out; - - /* Private VLANs necessary for dsa_8021q operation, which we need to - * determine on our own: - * - Sub-VLANs - * - Sub-VLANs of crosschip switches - */ - rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging, - &num_retagging); - if (rc) - goto out; - - rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging, - &num_retagging); - if (rc) - goto out; - - rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging); - if (rc) - goto out; - - rc = sja1105_commit_pvid(priv); - if (rc) - goto out; - - for (i = 0; i < priv->ds->num_ports; i++) - sja1105_commit_subvlan_map(priv, i, subvlan_map[i]); - - if (notify) { - rc = sja1105_notify_crosschip_switches(priv); - if (rc) - goto out; - } - -out: - kfree(new_vlan); - kfree(new_retagging); - - return rc; -} - /* The TPID setting belongs to the General Parameters table, * which can only be partially reconfigured at runtime (and not the TPID). * So a switch reset is required. @@ -2757,13 +2424,10 @@ out: int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, struct netlink_ext_ack *extack) { - struct sja1105_l2_lookup_params_entry *l2_lookup_params; struct sja1105_general_params_entry *general_params; struct sja1105_private *priv = ds->priv; - enum sja1105_vlan_state state; struct sja1105_table *table; struct sja1105_rule *rule; - bool want_tagging; u16 tpid, tpid2; int rc; @@ -2785,382 +2449,211 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, tpid2 = ETH_P_SJA1105; } - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (enabled) - sp->xmit_tpid = priv->info->qinq_tpid; - else - sp->xmit_tpid = ETH_P_SJA1105; - } - - if (!enabled) - state = SJA1105_VLAN_UNAWARE; - else if (priv->best_effort_vlan_filtering) - state = SJA1105_VLAN_BEST_EFFORT; - else - state = SJA1105_VLAN_FILTERING_FULL; - - if (priv->vlan_state == state) - return 0; - - priv->vlan_state = state; - want_tagging = (state == SJA1105_VLAN_UNAWARE || - state == SJA1105_VLAN_BEST_EFFORT); - table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ general_params->tpid = tpid; /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ general_params->tpid2 = tpid2; - /* When VLAN filtering is on, we need to at least be able to - * decode management traffic through the "backup plan". - */ - general_params->incl_srcpt1 = enabled; - general_params->incl_srcpt0 = enabled; - - want_tagging = priv->best_effort_vlan_filtering || !enabled; - - /* VLAN filtering => independent VLAN learning. - * No VLAN filtering (or best effort) => shared VLAN learning. - * - * In shared VLAN learning mode, untagged traffic still gets - * pvid-tagged, and the FDB table gets populated with entries - * containing the "real" (pvid or from VLAN tag) VLAN ID. - * However the switch performs a masked L2 lookup in the FDB, - * effectively only looking up a frame's DMAC (and not VID) for the - * forwarding decision. - * - * This is extremely convenient for us, because in modes with - * vlan_filtering=0, dsa_8021q actually installs unique pvid's into - * each front panel port. This is good for identification but breaks - * learning badly - the VID of the learnt FDB entry is unique, aka - * no frames coming from any other port are going to have it. So - * for forwarding purposes, this is as though learning was broken - * (all frames get flooded). - */ - table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; - l2_lookup_params = table->entries; - l2_lookup_params->shared_learn = want_tagging; - sja1105_frame_memory_partitioning(priv); + for (port = 0; port < ds->num_ports; port++) { + if (dsa_is_unused_port(ds, port)) + continue; - rc = sja1105_build_vlan_table(priv, false); - if (rc) - return rc; + rc = sja1105_commit_pvid(ds, port); + if (rc) + return rc; + } rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); if (rc) NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype"); - /* Switch port identification based on 802.1Q is only passable - * if we are not under a vlan_filtering bridge. So make sure - * the two configurations are mutually exclusive (of course, the - * user may know better, i.e. best_effort_vlan_filtering). - */ - return sja1105_setup_8021q_tagging(ds, want_tagging); + return rc; } -/* Returns number of VLANs added (0 or 1) on success, - * or a negative error code. - */ -static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid, - u16 flags, struct list_head *vlan_list) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct sja1105_bridge_vlan *v; - - list_for_each_entry(v, vlan_list, list) { - if (v->port == port && v->vid == vid) { - /* Already added */ - if (v->untagged == untagged && v->pvid == pvid) - /* Nothing changed */ - return 0; - - /* It's the same VLAN, but some of the flags changed - * and the user did not bother to delete it first. - * Update it and trigger sja1105_build_vlan_table. - */ - v->untagged = untagged; - v->pvid = pvid; - return 1; - } - } +static int sja1105_vlan_add(struct sja1105_private *priv, int port, u16 vid, + u16 flags, bool allowed_ingress) +{ + struct sja1105_vlan_lookup_entry *vlan; + struct sja1105_table *table; + int match, rc; - v = kzalloc(sizeof(*v), GFP_KERNEL); - if (!v) { - dev_err(ds->dev, "Out of memory while storing VLAN\n"); - return -ENOMEM; + table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; + + match = sja1105_is_vlan_configured(priv, vid); + if (match < 0) { + rc = sja1105_table_resize(table, table->entry_count + 1); + if (rc) + return rc; + match = table->entry_count - 1; } - v->port = port; - v->vid = vid; - v->untagged = untagged; - v->pvid = pvid; - list_add(&v->list, vlan_list); + /* Assign pointer after the resize (it's new memory) */ + vlan = table->entries; + + vlan[match].type_entry = SJA1110_VLAN_D_TAG; + vlan[match].vlanid = vid; + vlan[match].vlan_bc |= BIT(port); + + if (allowed_ingress) + vlan[match].vmemb_port |= BIT(port); + else + vlan[match].vmemb_port &= ~BIT(port); + + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) + vlan[match].tag_port &= ~BIT(port); + else + vlan[match].tag_port |= BIT(port); - return 1; + return sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, + &vlan[match], true); } -/* Returns number of VLANs deleted (0 or 1) */ -static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid, - struct list_head *vlan_list) +static int sja1105_vlan_del(struct sja1105_private *priv, int port, u16 vid) { - struct sja1105_bridge_vlan *v, *n; + struct sja1105_vlan_lookup_entry *vlan; + struct sja1105_table *table; + bool keep = true; + int match, rc; - list_for_each_entry_safe(v, n, vlan_list, list) { - if (v->port == port && v->vid == vid) { - list_del(&v->list); - kfree(v); - return 1; - } - } + table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP]; + + match = sja1105_is_vlan_configured(priv, vid); + /* Can't delete a missing entry. */ + if (match < 0) + return 0; + + /* Assign pointer after the resize (it's new memory) */ + vlan = table->entries; + + vlan[match].vlanid = vid; + vlan[match].vlan_bc &= ~BIT(port); + vlan[match].vmemb_port &= ~BIT(port); + /* Also unset tag_port, just so we don't have a confusing bitmap + * (no practical purpose). + */ + vlan[match].tag_port &= ~BIT(port); + + /* If there's no port left as member of this VLAN, + * it's time for it to go. + */ + if (!vlan[match].vmemb_port) + keep = false; + + rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid, + &vlan[match], keep); + if (rc < 0) + return rc; + + if (!keep) + return sja1105_table_delete_entry(table, match); return 0; } -static int sja1105_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) +static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { struct sja1105_private *priv = ds->priv; - bool vlan_table_changed = false; + u16 flags = vlan->flags; int rc; - /* If the user wants best-effort VLAN filtering (aka vlan_filtering - * bridge plus tagging), be sure to at least deny alterations to the - * configuration done by dsa_8021q. + /* Be sure to deny alterations to the configuration done by tag_8021q. */ - if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL && - vid_is_dsa_8021q(vlan->vid)) { + if (vid_is_dsa_8021q(vlan->vid)) { NL_SET_ERR_MSG_MOD(extack, - "Range 1024-3071 reserved for dsa_8021q operation"); + "Range 3072-4095 reserved for dsa_8021q operation"); return -EBUSY; } - rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags, - &priv->bridge_vlans); - if (rc < 0) + /* Always install bridge VLANs as egress-tagged on CPU and DSA ports */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + flags = 0; + + rc = sja1105_vlan_add(priv, port, vlan->vid, flags, true); + if (rc) return rc; - if (rc > 0) - vlan_table_changed = true; - if (!vlan_table_changed) - return 0; + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + priv->bridge_pvid[port] = vlan->vid; - return sja1105_build_vlan_table(priv, true); + return sja1105_commit_pvid(ds, port); } -static int sja1105_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int sja1105_bridge_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { struct sja1105_private *priv = ds->priv; - bool vlan_table_changed = false; int rc; - rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans); - if (rc > 0) - vlan_table_changed = true; - - if (!vlan_table_changed) - return 0; + rc = sja1105_vlan_del(priv, port, vlan->vid); + if (rc) + return rc; - return sja1105_build_vlan_table(priv, true); + /* In case the pvid was deleted, make sure that untagged packets will + * be dropped. + */ + return sja1105_commit_pvid(ds, port); } static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, u16 flags) { struct sja1105_private *priv = ds->priv; + bool allowed_ingress = true; int rc; - rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans); - if (rc <= 0) + /* Prevent attackers from trying to inject a DSA tag from + * the outside world. + */ + if (dsa_is_user_port(ds, port)) + allowed_ingress = false; + + rc = sja1105_vlan_add(priv, port, vid, flags, allowed_ingress); + if (rc) return rc; - return sja1105_build_vlan_table(priv, true); + if (flags & BRIDGE_VLAN_INFO_PVID) + priv->tag_8021q_pvid[port] = vid; + + return sja1105_commit_pvid(ds, port); } static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct sja1105_private *priv = ds->priv; - int rc; - - rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans); - if (!rc) - return 0; - return sja1105_build_vlan_table(priv, true); + return sja1105_vlan_del(priv, port, vid); } -static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = { - .vlan_add = sja1105_dsa_8021q_vlan_add, - .vlan_del = sja1105_dsa_8021q_vlan_del, -}; - -/* The programming model for the SJA1105 switch is "all-at-once" via static - * configuration tables. Some of these can be dynamically modified at runtime, - * but not the xMII mode parameters table. - * Furthermode, some PHYs may not have crystals for generating their clocks - * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's - * ref_clk pin. So port clocking needs to be initialized early, before - * connecting to PHYs is attempted, otherwise they won't respond through MDIO. - * Setting correct PHY link speed does not matter now. - * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY - * bindings are not yet parsed by DSA core. We need to parse early so that we - * can populate the xMII mode parameters table. - */ -static int sja1105_setup(struct dsa_switch *ds) +static int sja1105_prechangeupper(struct dsa_switch *ds, int port, + struct netdev_notifier_changeupper_info *info) { - struct sja1105_private *priv = ds->priv; - int rc; - - rc = sja1105_parse_dt(priv); - if (rc < 0) { - dev_err(ds->dev, "Failed to parse DT: %d\n", rc); - return rc; - } - - /* Error out early if internal delays are required through DT - * and we can't apply them. - */ - rc = sja1105_parse_rgmii_delays(priv); - if (rc < 0) { - dev_err(ds->dev, "RGMII delay not supported\n"); - return rc; - } + struct netlink_ext_ack *extack = info->info.extack; + struct net_device *upper = info->upper_dev; + struct dsa_switch_tree *dst = ds->dst; + struct dsa_port *dp; - rc = sja1105_ptp_clock_register(ds); - if (rc < 0) { - dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); - return rc; - } - - rc = sja1105_mdiobus_register(ds); - if (rc < 0) { - dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", - ERR_PTR(rc)); - goto out_ptp_clock_unregister; - } - - if (priv->info->disable_microcontroller) { - rc = priv->info->disable_microcontroller(priv); - if (rc < 0) { - dev_err(ds->dev, - "Failed to disable microcontroller: %pe\n", - ERR_PTR(rc)); - goto out_mdiobus_unregister; - } + if (is_vlan_dev(upper)) { + NL_SET_ERR_MSG_MOD(extack, "8021q uppers are not supported"); + return -EBUSY; } - /* Create and send configuration down to device */ - rc = sja1105_static_config_load(priv); - if (rc < 0) { - dev_err(ds->dev, "Failed to load static config: %d\n", rc); - goto out_mdiobus_unregister; - } + if (netif_is_bridge_master(upper)) { + list_for_each_entry(dp, &dst->ports, list) { + struct net_device *br = dsa_port_bridge_dev_get(dp); - /* Configure the CGU (PHY link modes and speeds) */ - if (priv->info->clocking_setup) { - rc = priv->info->clocking_setup(priv); - if (rc < 0) { - dev_err(ds->dev, - "Failed to configure MII clocking: %pe\n", - ERR_PTR(rc)); - goto out_static_config_free; + if (br && br != upper && br_vlan_enabled(br)) { + NL_SET_ERR_MSG_MOD(extack, + "Only one VLAN-aware bridge is supported"); + return -EBUSY; + } } } - /* On SJA1105, VLAN filtering per se is always enabled in hardware. - * The only thing we can do to disable it is lie about what the 802.1Q - * EtherType is. - * So it will still try to apply VLAN filtering, but all ingress - * traffic (except frames received with EtherType of ETH_P_SJA1105) - * will be internally tagged with a distorted VLAN header where the - * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. - */ - ds->vlan_filtering_is_global = true; - - /* Advertise the 8 egress queues */ - ds->num_tx_queues = SJA1105_NUM_TC; - - ds->mtu_enforcement_ingress = true; - - priv->best_effort_vlan_filtering = true; - - rc = sja1105_devlink_setup(ds); - if (rc < 0) - goto out_static_config_free; - - /* The DSA/switchdev model brings up switch ports in standalone mode by - * default, and that means vlan_filtering is 0 since they're not under - * a bridge, so it's safe to set up switch tagging at this time. - */ - rtnl_lock(); - rc = sja1105_setup_8021q_tagging(ds, true); - rtnl_unlock(); - if (rc) - goto out_devlink_teardown; - return 0; - -out_devlink_teardown: - sja1105_devlink_teardown(ds); -out_mdiobus_unregister: - sja1105_mdiobus_unregister(ds); -out_ptp_clock_unregister: - sja1105_ptp_clock_unregister(ds); -out_static_config_free: - sja1105_static_config_free(&priv->static_config); - - return rc; -} - -static void sja1105_teardown(struct dsa_switch *ds) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_bridge_vlan *v, *n; - int port; - - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - continue; - - if (sp->xmit_worker) - kthread_destroy_worker(sp->xmit_worker); - } - - sja1105_devlink_teardown(ds); - sja1105_flower_teardown(ds); - sja1105_tas_teardown(ds); - sja1105_ptp_clock_unregister(ds); - sja1105_static_config_free(&priv->static_config); - - list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) { - list_del(&v->list); - kfree(v); - } - - list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) { - list_del(&v->list); - kfree(v); - } -} - -static void sja1105_port_disable(struct dsa_switch *ds, int port) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - return; - - kthread_cancel_work_sync(&sp->xmit_work); - skb_queue_purge(&sp->xmit_queue); } static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, @@ -3188,7 +2681,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, } /* Transfer skb to the host port. */ - dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); + dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user); /* Wait until the switch has processed the frame */ do { @@ -3221,10 +2714,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, return NETDEV_TX_OK; } -#define work_to_port(work) \ - container_of((work), struct sja1105_port, xmit_work) -#define tagger_to_sja1105(t) \ - container_of((t), struct sja1105_private, tagger_data) +#define work_to_xmit_work(w) \ + container_of((w), struct sja1105_deferred_xmit_work, work) /* Deferred work is unfortunately necessary because setting up the management * route cannot be done from atomit context (SPI transfer takes a sleepable @@ -3232,25 +2723,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, */ static void sja1105_port_deferred_xmit(struct kthread_work *work) { - struct sja1105_port *sp = work_to_port(work); - struct sja1105_tagger_data *tagger_data = sp->data; - struct sja1105_private *priv = tagger_to_sja1105(tagger_data); - int port = sp - priv->ports; - struct sk_buff *skb; + struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct sk_buff *clone, *skb = xmit_work->skb; + struct dsa_switch *ds = xmit_work->dp->ds; + struct sja1105_private *priv = ds->priv; + int port = xmit_work->dp->index; - while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { - struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; + clone = SJA1105_SKB_CB(skb)->clone; - mutex_lock(&priv->mgmt_lock); + mutex_lock(&priv->mgmt_lock); - sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); + sja1105_mgmt_xmit(ds, port, 0, skb, !!clone); - /* The clone, if there, was made by dsa_skb_tx_timestamp */ - if (clone) - sja1105_ptp_txtstamp_skb(priv->ds, port, clone); + /* The clone, if there, was made by dsa_skb_tx_timestamp */ + if (clone) + sja1105_ptp_txtstamp_skb(ds, port, clone); - mutex_unlock(&priv->mgmt_lock); - } + mutex_unlock(&priv->mgmt_lock); + + kfree(xmit_work); +} + +static int sja1105_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data; + + if (proto != priv->info->tag_proto) + return -EPROTONOSUPPORT; + + tagger_data = sja1105_tagger_data(ds); + tagger_data->xmit_work_fn = sja1105_port_deferred_xmit; + tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp; + + return 0; } /* The MAXAGE setting belongs to the L2 Forwarding Parameters table, @@ -3284,7 +2791,7 @@ static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu) new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN; - if (dsa_is_cpu_port(ds, port)) + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) new_mtu += VLAN_HLEN; policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries; @@ -3382,7 +2889,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, static int sja1105_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, - bool ingress) + bool ingress, struct netlink_ext_ack *extack) { return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, ingress, true); @@ -3431,23 +2938,13 @@ static int sja1105_port_set_learning(struct sja1105_private *priv, int port, bool enabled) { struct sja1105_mac_config_entry *mac; - int rc; mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; mac[port].dyn_learn = enabled; - rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, - &mac[port], true); - if (rc) - return rc; - - if (enabled) - priv->learn_ena |= BIT(port); - else - priv->learn_ena &= ~BIT(port); - - return 0; + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port, + &mac[port], true); } static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to, @@ -3476,7 +2973,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, { struct sja1105_l2_lookup_entry *l2_lookup; struct sja1105_table *table; - int match; + int match, rc; + + mutex_lock(&priv->fdb_lock); table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP]; l2_lookup = table->entries; @@ -3489,7 +2988,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, if (match == table->entry_count) { NL_SET_ERR_MSG_MOD(extack, "Could not find FDB entry for unknown multicast"); - return -ENOSPC; + rc = -ENOSPC; + goto out; } if (flags.val & BR_MCAST_FLOOD) @@ -3497,10 +2997,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to, else l2_lookup[match].destports &= ~BIT(to); - return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, - l2_lookup[match].index, - &l2_lookup[match], - true); + rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP, + l2_lookup[match].index, + &l2_lookup[match], true); +out: + mutex_unlock(&priv->fdb_lock); + + return rc; } static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port, @@ -3563,33 +3066,160 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, return 0; } +/* The programming model for the SJA1105 switch is "all-at-once" via static + * configuration tables. Some of these can be dynamically modified at runtime, + * but not the xMII mode parameters table. + * Furthermode, some PHYs may not have crystals for generating their clocks + * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's + * ref_clk pin. So port clocking needs to be initialized early, before + * connecting to PHYs is attempted, otherwise they won't respond through MDIO. + * Setting correct PHY link speed does not matter now. + * But dsa_user_phy_setup is called later than sja1105_setup, so the PHY + * bindings are not yet parsed by DSA core. We need to parse early so that we + * can populate the xMII mode parameters table. + */ +static int sja1105_setup(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + int rc; + + if (priv->info->disable_microcontroller) { + rc = priv->info->disable_microcontroller(priv); + if (rc < 0) { + dev_err(ds->dev, + "Failed to disable microcontroller: %pe\n", + ERR_PTR(rc)); + return rc; + } + } + + /* Create and send configuration down to device */ + rc = sja1105_static_config_load(priv); + if (rc < 0) { + dev_err(ds->dev, "Failed to load static config: %d\n", rc); + return rc; + } + + /* Configure the CGU (PHY link modes and speeds) */ + if (priv->info->clocking_setup) { + rc = priv->info->clocking_setup(priv); + if (rc < 0) { + dev_err(ds->dev, + "Failed to configure MII clocking: %pe\n", + ERR_PTR(rc)); + goto out_static_config_free; + } + } + + sja1105_tas_setup(ds); + sja1105_flower_setup(ds); + + rc = sja1105_ptp_clock_register(ds); + if (rc < 0) { + dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); + goto out_flower_teardown; + } + + rc = sja1105_mdiobus_register(ds); + if (rc < 0) { + dev_err(ds->dev, "Failed to register MDIO bus: %pe\n", + ERR_PTR(rc)); + goto out_ptp_clock_unregister; + } + + rc = sja1105_devlink_setup(ds); + if (rc < 0) + goto out_mdiobus_unregister; + + rtnl_lock(); + rc = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q)); + rtnl_unlock(); + if (rc) + goto out_devlink_teardown; + + /* On SJA1105, VLAN filtering per se is always enabled in hardware. + * The only thing we can do to disable it is lie about what the 802.1Q + * EtherType is. + * So it will still try to apply VLAN filtering, but all ingress + * traffic (except frames received with EtherType of ETH_P_SJA1105) + * will be internally tagged with a distorted VLAN header where the + * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid. + */ + ds->vlan_filtering_is_global = true; + ds->fdb_isolation = true; + ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; + + /* Advertise the 8 egress queues */ + ds->num_tx_queues = SJA1105_NUM_TC; + + ds->mtu_enforcement_ingress = true; + ds->assisted_learning_on_cpu_port = true; + + return 0; + +out_devlink_teardown: + sja1105_devlink_teardown(ds); +out_mdiobus_unregister: + sja1105_mdiobus_unregister(ds); +out_ptp_clock_unregister: + sja1105_ptp_clock_unregister(ds); +out_flower_teardown: + sja1105_flower_teardown(ds); + sja1105_tas_teardown(ds); +out_static_config_free: + sja1105_static_config_free(&priv->static_config); + + return rc; +} + +static void sja1105_teardown(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + + rtnl_lock(); + dsa_tag_8021q_unregister(ds); + rtnl_unlock(); + + sja1105_devlink_teardown(ds); + sja1105_mdiobus_unregister(ds); + sja1105_ptp_clock_unregister(ds); + sja1105_flower_teardown(ds); + sja1105_tas_teardown(ds); + sja1105_static_config_free(&priv->static_config); +} + +static const struct phylink_mac_ops sja1105_phylink_mac_ops = { + .mac_select_pcs = sja1105_mac_select_pcs, + .mac_config = sja1105_mac_config, + .mac_link_up = sja1105_mac_link_up, + .mac_link_down = sja1105_mac_link_down, +}; + static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, + .connect_tag_protocol = sja1105_connect_tag_protocol, .setup = sja1105_setup, .teardown = sja1105_teardown, .set_ageing_time = sja1105_set_ageing_time, .port_change_mtu = sja1105_change_mtu, .port_max_mtu = sja1105_get_max_mtu, - .phylink_validate = sja1105_phylink_validate, - .phylink_mac_config = sja1105_mac_config, - .phylink_mac_link_up = sja1105_mac_link_up, - .phylink_mac_link_down = sja1105_mac_link_down, + .phylink_get_caps = sja1105_phylink_get_caps, .get_strings = sja1105_get_strings, .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, .get_ts_info = sja1105_get_ts_info, - .port_disable = sja1105_port_disable, .port_fdb_dump = sja1105_fdb_dump, .port_fdb_add = sja1105_fdb_add, .port_fdb_del = sja1105_fdb_del, + .port_fast_age = sja1105_fast_age, .port_bridge_join = sja1105_bridge_join, .port_bridge_leave = sja1105_bridge_leave, .port_pre_bridge_flags = sja1105_port_pre_bridge_flags, .port_bridge_flags = sja1105_port_bridge_flags, .port_stp_state_set = sja1105_bridge_stp_state_set, .port_vlan_filtering = sja1105_vlan_filtering, - .port_vlan_add = sja1105_vlan_add, - .port_vlan_del = sja1105_vlan_del, + .port_vlan_add = sja1105_bridge_vlan_add, + .port_vlan_del = sja1105_bridge_vlan_del, .port_mdb_add = sja1105_mdb_add, .port_mdb_del = sja1105_mdb_del, .port_hwtstamp_get = sja1105_hwtstamp_get, @@ -3604,11 +3234,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .cls_flower_add = sja1105_cls_flower_add, .cls_flower_del = sja1105_cls_flower_del, .cls_flower_stats = sja1105_cls_flower_stats, - .crosschip_bridge_join = sja1105_crosschip_bridge_join, - .crosschip_bridge_leave = sja1105_crosschip_bridge_leave, - .devlink_param_get = sja1105_devlink_param_get, - .devlink_param_set = sja1105_devlink_param_set, .devlink_info_get = sja1105_devlink_info_get, + .tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add, + .tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del, + .port_prechangeupper = sja1105_prechangeupper, }; static const struct of_device_id sja1105_dt_ids[]; @@ -3662,29 +3291,25 @@ static int sja1105_check_device_id(struct sja1105_private *priv) static int sja1105_probe(struct spi_device *spi) { - struct sja1105_tagger_data *tagger_data; struct device *dev = &spi->dev; struct sja1105_private *priv; size_t max_xfer, max_msg; struct dsa_switch *ds; - int rc, port; + int rc; if (!dev->of_node) { dev_err(dev, "No DTS bindings for SJA1105 driver\n"); return -EINVAL; } + rc = sja1105_hw_reset(dev, 1, 1); + if (rc) + return rc; + priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); if (!priv) return -ENOMEM; - /* Configure the optional reset pin and bring up switch */ - priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) - dev_dbg(dev, "reset-gpios not defined, ignoring\n"); - else - sja1105_hw_reset(priv->reset_gpio, 1, 1); - /* Populate our driver private structure (priv) based on * the device tree node that was probed (spi) */ @@ -3744,99 +3369,53 @@ static int sja1105_probe(struct spi_device *spi) ds->dev = dev; ds->num_ports = priv->info->num_ports; ds->ops = &sja1105_switch_ops; + ds->phylink_mac_ops = &sja1105_phylink_mac_ops; ds->priv = priv; priv->ds = ds; - tagger_data = &priv->tagger_data; - mutex_init(&priv->ptp_data.lock); + mutex_init(&priv->dynamic_config_lock); mutex_init(&priv->mgmt_lock); + mutex_init(&priv->fdb_lock); + spin_lock_init(&priv->ts_id_lock); - priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx), - GFP_KERNEL); - if (!priv->dsa_8021q_ctx) - return -ENOMEM; - - priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops; - priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q); - priv->dsa_8021q_ctx->ds = ds; - - INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links); - INIT_LIST_HEAD(&priv->bridge_vlans); - INIT_LIST_HEAD(&priv->dsa_8021q_vlans); - - sja1105_tas_setup(ds); - sja1105_flower_setup(ds); - - rc = dsa_register_switch(priv->ds); - if (rc) + rc = sja1105_parse_dt(priv); + if (rc < 0) { + dev_err(ds->dev, "Failed to parse DT: %d\n", rc); return rc; + } if (IS_ENABLED(CONFIG_NET_SCH_CBS)) { priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers, sizeof(struct sja1105_cbs_entry), GFP_KERNEL); - if (!priv->cbs) { - rc = -ENOMEM; - goto out_unregister_switch; - } - } - - /* Connections between dsa_port and sja1105_port */ - for (port = 0; port < ds->num_ports; port++) { - struct sja1105_port *sp = &priv->ports[port]; - struct dsa_port *dp = dsa_to_port(ds, port); - struct net_device *slave; - int subvlan; - - if (!dsa_is_user_port(ds, port)) - continue; - - dp->priv = sp; - sp->dp = dp; - sp->data = tagger_data; - slave = dp->slave; - kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); - sp->xmit_worker = kthread_create_worker(0, "%s_xmit", - slave->name); - if (IS_ERR(sp->xmit_worker)) { - rc = PTR_ERR(sp->xmit_worker); - dev_err(ds->dev, - "failed to create deferred xmit thread: %d\n", - rc); - goto out_destroy_workers; - } - skb_queue_head_init(&sp->xmit_queue); - sp->xmit_tpid = ETH_P_SJA1105; - - for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) - sp->subvlan_map[subvlan] = VLAN_N_VID; + if (!priv->cbs) + return -ENOMEM; } - return 0; - -out_destroy_workers: - while (port-- > 0) { - struct sja1105_port *sp = &priv->ports[port]; - - if (!dsa_is_user_port(ds, port)) - continue; + return dsa_register_switch(priv->ds); +} - kthread_destroy_worker(sp->xmit_worker); - } +static void sja1105_remove(struct spi_device *spi) +{ + struct sja1105_private *priv = spi_get_drvdata(spi); -out_unregister_switch: - dsa_unregister_switch(ds); + if (!priv) + return; - return rc; + dsa_unregister_switch(priv->ds); } -static int sja1105_remove(struct spi_device *spi) +static void sja1105_shutdown(struct spi_device *spi) { struct sja1105_private *priv = spi_get_drvdata(spi); - dsa_unregister_switch(priv->ds); - return 0; + if (!priv) + return; + + dsa_switch_shutdown(priv->ds); + + spi_set_drvdata(spi, NULL); } static const struct of_device_id sja1105_dt_ids[] = { @@ -3854,14 +3433,30 @@ static const struct of_device_id sja1105_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, sja1105_dt_ids); +static const struct spi_device_id sja1105_spi_ids[] = { + { "sja1105e" }, + { "sja1105t" }, + { "sja1105p" }, + { "sja1105q" }, + { "sja1105r" }, + { "sja1105s" }, + { "sja1110a" }, + { "sja1110b" }, + { "sja1110c" }, + { "sja1110d" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, sja1105_spi_ids); + static struct spi_driver sja1105_driver = { .driver = { .name = "sja1105", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(sja1105_dt_ids), }, + .id_table = sja1105_spi_ids, .probe = sja1105_probe, .remove = sja1105_remove, + .shutdown = sja1105_shutdown, }; module_spi_driver(sja1105_driver); |
