diff options
Diffstat (limited to 'drivers/net/dsa/ocelot/felix.c')
| -rw-r--r-- | drivers/net/dsa/ocelot/felix.c | 1945 |
1 files changed, 1352 insertions, 593 deletions
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index a2a15919b960..9e5ede932b42 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright 2019-2021 NXP Semiconductors +/* Copyright 2019-2021 NXP * * This is an umbrella module for all network switches that are * register-compatible with Ocelot and that perform I/O to their host CPU @@ -21,37 +21,104 @@ #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> -#include <linux/pcs-lynx.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" -static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +/* Translate the DSA database API into the ocelot switch library API, + * which uses VID 0 for all ports that aren't part of a bridge, + * and expects the bridge_dev to be NULL in that case. + */ +static struct net_device *felix_classify_db(struct dsa_db db) { - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int key_length, upstream, err; + switch (db.type) { + case DSA_DB_PORT: + case DSA_DB_LAG: + return NULL; + case DSA_DB_BRIDGE: + return db.bridge.dev; + default: + return ERR_PTR(-EOPNOTSUPP); + } +} - /* We don't need to install the rxvlan into the other ports' filtering - * tables, because we're just pushing the rxvlan when sending towards - * the CPU - */ - if (!pvid) - return 0; +static int felix_cpu_port_for_conduit(struct dsa_switch *ds, + struct net_device *conduit) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; + int lag; + + if (netif_is_lag_master(conduit)) { + mutex_lock(&ocelot->fwd_domain_lock); + lag = ocelot_bond_get_id(ocelot, conduit); + mutex_unlock(&ocelot->fwd_domain_lock); + + return lag; + } + + cpu_dp = conduit->dsa_ptr; + return cpu_dp->index; +} + +/** + * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after + * vlan_filtering change + * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed + * @vlan_filtering: Current bridge VLAN filtering setting + * + * Source port identification for tag_8021q is done using VCAP ES0 rules on the + * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as + * either: + * - push_inner_tag=0: the inner tag is never pushed into the frame + * (and we lose info about the classified VLAN). This is + * good when the classified VLAN is a discardable quantity + * for the software RX path: it is either set to + * OCELOT_STANDALONE_PVID, or to + * ocelot_vlan_unaware_pvid(bridge). + * - push_inner_tag=1: the inner tag is always pushed. This is good when the + * classified VLAN is not a discardable quantity (the port + * is under a VLAN-aware bridge, and software needs to + * continue processing the packet in the same VLAN as the + * hardware). + * The point is that what is good for a VLAN-unaware port is not good for a + * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in + * sync with the VLAN filtering state of the port. + */ +static void +felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule, + bool vlan_filtering) +{ + if (vlan_filtering) + outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG; + else + outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG; +} + +/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that + * the tagger can perform RX source port identification. + */ +static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid, + bool vlan_filtering) +{ + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int key_length, err; key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; - upstream = dsa_upstream_port(ds, port); outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!outer_tagging_rule) return -ENOMEM; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; outer_tagging_rule->prio = 1; - outer_tagging_rule->id.cookie = port; + outer_tagging_rule->id.cookie = cookie; outer_tagging_rule->id.tc_offload = false; outer_tagging_rule->block_id = VCAP_ES0; outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -64,6 +131,14 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; outer_tagging_rule->action.tag_a_vid_sel = 1; outer_tagging_rule->action.vid_a_val = vid; + felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering); + outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q; + /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also + * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to + * the classified VID, which we need to see in the DSA tagger's receive + * path. Note: the inner tag is only visible in the packet when pushed + * (push_inner_tag == OCELOT_ES0_TAG). + */ err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); if (err) @@ -72,20 +147,36 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid, return err; } -static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, - bool pvid, bool untagged) +static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) { - struct ocelot_vcap_filter *untagging_rule, *redirect_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int upstream, err; + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; - /* tag_8021q.c assumes we are implementing this via port VLAN - * membership, which we aren't. So we don't need to add any VCAP filter - * for the CPU port. - */ - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; + block_vcap_es0 = &ocelot->block[VCAP_ES0]; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + cookie, false); + if (!outer_tagging_rule) + return -ENOENT; + + return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); +} + +/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 + * rules for steering those tagged packets towards the correct destination port + */ +static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, + u16 vid) +{ + struct ocelot_vcap_filter *untagging_rule, *redirect_rule; + unsigned long cpu_ports = dsa_cpu_ports(ds); + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int err; untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) @@ -97,14 +188,14 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return -ENOMEM; } - upstream = dsa_upstream_port(ds, port); + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; - untagging_rule->ingress_port_mask = BIT(upstream); + untagging_rule->ingress_port_mask = cpu_ports; untagging_rule->vlan.vid.value = vid; untagging_rule->vlan.vid.mask = VLAN_VID_MASK; untagging_rule->prio = 1; - untagging_rule->id.cookie = port; + untagging_rule->id.cookie = cookie; untagging_rule->id.tc_offload = false; untagging_rule->block_id = VCAP_IS1; untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -121,11 +212,13 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return err; } + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = BIT(upstream); + redirect_rule->ingress_port_mask = cpu_ports; redirect_rule->pag = port; redirect_rule->prio = 1; - redirect_rule->id.cookie = port; + redirect_rule->id.cookie = cookie; redirect_rule->id.tc_offload = false; redirect_rule->block_id = VCAP_IS2; redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -143,352 +236,213 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid, return 0; } -static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, - u16 flags) -{ - bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = flags & BRIDGE_VLAN_INFO_PVID; - struct ocelot *ocelot = ds->priv; - - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot), - port, vid, pvid, untagged); - - return 0; -} - -static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid) -{ - struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot_vcap_block *block_vcap_es0; - struct ocelot *ocelot = &felix->ocelot; - - block_vcap_es0 = &ocelot->block[VCAP_ES0]; - - outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, - port, false); - /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid - * installing outer tagging ES0 rules where they weren't needed. - * But in rxvlan_del, the API doesn't give us the "flags" anymore, - * so that forces us to be slightly sloppy here, and just assume that - * if we didn't find an outer_tagging_rule it means that there was - * none in the first place, i.e. rxvlan_del is called on a non-pvid - * port. This is most probably true though. - */ - if (!outer_tagging_rule) - return 0; - - return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); -} - -static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; int err; - if (ocelot->ports[port]->is_dsa_8021q_cpu) - return 0; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - port, false); + cookie, false); if (!untagging_rule) - return 0; + return -ENOENT; err = ocelot_vcap_filter_del(ocelot, untagging_rule); if (err) return err; + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - port, false); + cookie, false); if (!redirect_rule) - return 0; + return -ENOENT; return ocelot_vcap_filter_del(ocelot, redirect_rule); } -static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) +static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) { - struct ocelot *ocelot = ds->priv; - - if (vid_is_dsa_8021q_rxvlan(vid)) - return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot), - port, vid); - - if (vid_is_dsa_8021q_txvlan(vid)) - return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot), - port, vid); - - return 0; -} - -static const struct dsa_8021q_ops felix_tag_8021q_ops = { - .vlan_add = felix_tag_8021q_vlan_add, - .vlan_del = felix_tag_8021q_vlan_del, -}; + struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_port *cpu_dp; + int err; -/* Alternatively to using the NPI functionality, that same hardware MAC - * connected internally to the enetc or fman DSA master can be configured to - * use the software-defined tag_8021q frame format. As far as the hardware is - * concerned, it thinks it is a "dumb switch" - the queues of the CPU port - * module are now disconnected from it, but can still be accessed through - * register-based MMIO. - */ -static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) -{ - ocelot->ports[port]->is_dsa_8021q_cpu = true; - ocelot->npi = -1; + /* tag_8021q.c assumes we are implementing this via port VLAN + * membership, which we aren't. So we don't need to add any VCAP filter + * for the CPU port. + */ + if (!dsa_port_is_user(dp)) + return 0; - /* Overwrite PGID_CPU with the non-tagging port */ - ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid, + dsa_port_is_vlan_filtering(dp)); + if (err) + return err; + } - ocelot_apply_bridge_fwd_mask(ocelot); -} + err = felix_tag_8021q_vlan_add_tx(ds, port, vid); + if (err) + goto add_tx_failed; -static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) -{ - ocelot->ports[port]->is_dsa_8021q_cpu = false; + return 0; - /* Restore PGID_CPU */ - ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, - PGID_CPU); +add_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); - ocelot_apply_bridge_fwd_mask(ocelot); + return err; } -/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module. - * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the - * tag_8021q CPU port. - */ -static int felix_setup_mmio_filtering(struct felix *felix) +static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { - unsigned long user_ports = 0, cpu_ports = 0; - struct ocelot_vcap_filter *redirect_rule; - struct ocelot_vcap_filter *tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int port, ret; - - tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!tagging_rule) - return -ENOMEM; - - redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); - if (!redirect_rule) { - kfree(tagging_rule); - return -ENOMEM; - } + struct dsa_port *dp = dsa_to_port(ds, port); + struct dsa_port *cpu_dp; + int err; - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_user_port(ds, port)) - user_ports |= BIT(port); - if (dsa_is_cpu_port(ds, port)) - cpu_ports |= BIT(port); - } + if (!dsa_port_is_user(dp)) + return 0; - tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; - *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); - *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff); - tagging_rule->ingress_port_mask = user_ports; - tagging_rule->prio = 1; - tagging_rule->id.cookie = ocelot->num_phys_ports; - tagging_rule->id.tc_offload = false; - tagging_rule->block_id = VCAP_IS1; - tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - tagging_rule->lookup = 0; - tagging_rule->action.pag_override_mask = 0xff; - tagging_rule->action.pag_val = ocelot->num_phys_ports; - - ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL); - if (ret) { - kfree(tagging_rule); - kfree(redirect_rule); - return ret; + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; } - redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = user_ports; - redirect_rule->pag = ocelot->num_phys_ports; - redirect_rule->prio = 1; - redirect_rule->id.cookie = ocelot->num_phys_ports; - redirect_rule->id.tc_offload = false; - redirect_rule->block_id = VCAP_IS2; - redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; - redirect_rule->lookup = 0; - redirect_rule->action.cpu_copy_ena = true; - if (felix->info->quirk_no_xtr_irq) { - /* Redirect to the tag_8021q CPU but also copy PTP packets to - * the CPU port module - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; - redirect_rule->action.port_mask = cpu_ports; - } else { - /* Trap PTP packets only to the CPU port module (which is - * redirected to the NPI port) - */ - redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; - redirect_rule->action.port_mask = 0; - } + err = felix_tag_8021q_vlan_del_tx(ds, port, vid); + if (err) + goto del_tx_failed; - ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); - if (ret) { - ocelot_vcap_filter_del(ocelot, tagging_rule); - kfree(redirect_rule); - return ret; - } + return 0; - /* The ownership of the CPU port module's queues might have just been - * transferred to the tag_8021q tagger from the NPI-based tagger. - * So there might still be all sorts of crap in the queues. On the - * other hand, the MMIO-based matching of PTP frames is very brittle, - * so we need to be careful that there are no extra frames to be - * dequeued over MMIO, since we would never know to discard them. - */ - ocelot_drain_cpu_queue(ocelot, 0); +del_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid, + dsa_port_is_vlan_filtering(dp)); - return 0; + return err; } -static int felix_teardown_mmio_filtering(struct felix *felix) +static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port, + bool vlan_filtering) { - struct ocelot_vcap_filter *tagging_rule, *redirect_rule; - struct ocelot_vcap_block *block_vcap_is1; - struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; + struct ocelot_vcap_filter *outer_tagging_rule; + struct ocelot_vcap_block *block_vcap_es0; + struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; + unsigned long cookie; int err; - block_vcap_is1 = &ocelot->block[VCAP_IS1]; - block_vcap_is2 = &ocelot->block[VCAP_IS2]; - - tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - ocelot->num_phys_ports, - false); - if (!tagging_rule) - return -ENOENT; - - err = ocelot_vcap_filter_del(ocelot, tagging_rule); - if (err) - return err; - - redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - ocelot->num_phys_ports, - false); - if (!redirect_rule) - return -ENOENT; - - return ocelot_vcap_filter_del(ocelot, redirect_rule); -} + block_vcap_es0 = &ocelot->block[VCAP_ES0]; -static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) -{ - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - unsigned long cpu_flood; - int port, err; + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, + cpu_dp->index); - felix_8021q_cpu_port_init(ocelot, cpu); + outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, + cookie, false); - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; + felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering); - /* This overwrites ocelot_init(): - * Do not forward BPDU frames to the CPU port module, - * for 2 reasons: - * - When these packets are injected from the tag_8021q - * CPU port, we want them to go out, not loop back - * into the system. - * - STP traffic ingressing on a user port should go to - * the tag_8021q CPU port, not to the hardware CPU - * port module. - */ - ocelot_write_gix(ocelot, - ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), - ANA_PORT_CPU_FWD_BPDU_CFG, port); + err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule); + if (err) + return err; } - /* In tag_8021q mode, the CPU port module is unused, except for PTP - * frames. So we want to disable flooding of any kind to the CPU port - * module, since packets going there will end in a black hole. - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC); - - felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx), - GFP_KERNEL); - if (!felix->dsa_8021q_ctx) - return -ENOMEM; - - felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops; - felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD); - felix->dsa_8021q_ctx->ds = ds; + return 0; +} - err = dsa_8021q_setup(felix->dsa_8021q_ctx, true); - if (err) - goto out_free_dsa_8021_ctx; +static int felix_trap_get_cpu_port(struct dsa_switch *ds, + const struct ocelot_vcap_filter *trap) +{ + struct dsa_port *dp; + int first_port; - err = felix_setup_mmio_filtering(felix); - if (err) - goto out_teardown_dsa_8021q; + if (WARN_ON(!trap->ingress_port_mask)) + return -1; - return 0; + first_port = __ffs(trap->ingress_port_mask); + dp = dsa_to_port(ds, first_port); -out_teardown_dsa_8021q: - dsa_8021q_setup(felix->dsa_8021q_ctx, false); -out_free_dsa_8021_ctx: - kfree(felix->dsa_8021q_ctx); - return err; + return dp->cpu_dp->index; } -static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) +/* On switches with no extraction IRQ wired, trapped packets need to be + * replicated over Ethernet as well, otherwise we'd get no notification of + * their arrival when using the ocelot-8021q tagging protocol. + */ +static int felix_update_trapping_destinations(struct dsa_switch *ds, + bool using_tag_8021q) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int err, port; + struct ocelot_vcap_block *block_vcap_is2; + struct ocelot_vcap_filter *trap; + enum ocelot_mask_mode mask_mode; + unsigned long port_mask; + bool cpu_copy_ena; + int err; - err = felix_teardown_mmio_filtering(felix); - if (err) - dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", - err); + if (!felix->info->quirk_no_xtr_irq) + return 0; - err = dsa_8021q_setup(felix->dsa_8021q_ctx, false); - if (err) - dev_err(ds->dev, "dsa_8021q_setup returned %d", err); + /* We are sure that "cpu" was found, otherwise + * dsa_tree_setup_default_cpu() would have failed earlier. + */ + block_vcap_is2 = &ocelot->block[VCAP_IS2]; + + /* Make sure all traps are set up for that destination */ + list_for_each_entry(trap, &block_vcap_is2->rules, list) { + if (!trap->is_trap) + continue; - kfree(felix->dsa_8021q_ctx); + /* Figure out the current trapping destination */ + if (using_tag_8021q) { + /* Redirect to the tag_8021q CPU port. If timestamps + * are necessary, also copy trapped packets to the CPU + * port module. + */ + mask_mode = OCELOT_MASK_MODE_REDIRECT; + port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); + cpu_copy_ena = !!trap->take_ts; + } else { + /* Trap packets only to the CPU port module, which is + * redirected to the NPI port (the DSA CPU port) + */ + mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; + port_mask = 0; + cpu_copy_ena = true; + } - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) + if (trap->action.mask_mode == mask_mode && + trap->action.port_mask == port_mask && + trap->action.cpu_copy_ena == cpu_copy_ena) continue; - /* Restore the logic from ocelot_init: - * do not forward BPDU frames to the front ports. - */ - ocelot_write_gix(ocelot, - ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), - ANA_PORT_CPU_FWD_BPDU_CFG, - port); + trap->action.mask_mode = mask_mode; + trap->action.port_mask = port_mask; + trap->action.cpu_copy_ena = cpu_copy_ena; + + err = ocelot_vcap_filter_replace(ocelot, trap); + if (err) + return err; } - felix_8021q_cpu_port_deinit(ocelot, cpu); + return 0; } /* The CPU port module is connected to the Node Processor Interface (NPI). This * is the mode through which frames can be injected from and extracted to an * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * running Linux, and this forms a DSA setup together with the enetc or fman - * DSA master. + * DSA conduit. */ static void felix_npi_port_init(struct ocelot *ocelot, int port) { @@ -527,103 +481,322 @@ static void felix_npi_port_deinit(struct ocelot *ocelot, int port) ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); } -static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) +static int felix_tag_npi_setup(struct dsa_switch *ds) { + struct dsa_port *dp, *first_cpu_dp = NULL; struct ocelot *ocelot = ds->priv; - unsigned long cpu_flood; - felix_npi_port_init(ocelot, cpu); + dsa_switch_for_each_user_port(dp, ds) { + if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { + dev_err(ds->dev, "Multiple NPI ports not supported\n"); + return -EINVAL; + } - /* Include the CPU port module (and indirectly, the NPI port) - * in the forwarding mask for unknown unicast - the hardware - * default value for ANA_FLOODING_FLD_UNICAST excludes - * BIT(ocelot->num_phys_ports), and so does ocelot_init, - * since Ocelot relies on whitelisting MAC addresses towards - * PGID_CPU. - * We do this because DSA does not yet perform RX filtering, - * and the NPI port does not perform source address learning, - * so traffic sent to Linux is effectively unknown from the - * switch's perspective. - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC); + first_cpu_dp = dp->cpu_dp; + } + + if (!first_cpu_dp) + return -EINVAL; + + felix_npi_port_init(ocelot, first_cpu_dp->index); return 0; } -static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) +static void felix_tag_npi_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - felix_npi_port_deinit(ocelot, cpu); + felix_npi_port_deinit(ocelot, ocelot->npi); } -static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) +static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) { - int err; + struct ocelot *ocelot = ds->priv; - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - err = felix_setup_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - err = felix_setup_tag_8021q(ds, cpu); - break; - default: - err = -EPROTONOSUPPORT; + return BIT(ocelot->num_phys_ports); +} + +static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; + struct ocelot *ocelot = ds->priv; + + if (netif_is_lag_master(conduit)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG DSA conduit only supported using ocelot-8021q"); + return -EOPNOTSUPP; } - return err; + /* Changing the NPI port breaks user ports still assigned to the old + * one, so only allow it while they're down, and don't allow them to + * come back up until they're all changed to the new one. + */ + dsa_switch_for_each_user_port(other_dp, ds) { + struct net_device *user = other_dp->user; + + if (other_dp != dp && (user->flags & IFF_UP) && + dsa_port_to_conduit(other_dp) != conduit) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot change while old conduit still has users"); + return -EOPNOTSUPP; + } + } + + felix_npi_port_deinit(ocelot, ocelot->npi); + felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit)); + + return 0; } -static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) +/* Alternatively to using the NPI functionality, that same hardware MAC + * connected internally to the enetc or fman DSA conduit can be configured to + * use the software-defined tag_8021q frame format. As far as the hardware is + * concerned, it thinks it is a "dumb switch" - the queues of the CPU port + * module are now disconnected from it, but can still be accessed through + * register-based MMIO. + */ +static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { + .setup = felix_tag_npi_setup, + .teardown = felix_tag_npi_teardown, + .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, + .change_conduit = felix_tag_npi_change_conduit, +}; + +static int felix_tag_8021q_setup(struct dsa_switch *ds) { - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - felix_teardown_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - felix_teardown_tag_8021q(ds, cpu); - break; - default: - break; + struct ocelot *ocelot = ds->priv; + struct dsa_port *dp; + int err; + + err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); + if (err) + return err; + + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index); + + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, + dp->cpu_dp->index); + + dsa_switch_for_each_available_port(dp, ds) + /* This overwrites ocelot_init(): + * Do not forward BPDU frames to the CPU port module, + * for 2 reasons: + * - When these packets are injected from the tag_8021q + * CPU port, we want them to go out, not loop back + * into the system. + * - STP traffic ingressing on a user port should go to + * the tag_8021q CPU port, not to the hardware CPU + * port module. + */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), + ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); + + /* The ownership of the CPU port module's queues might have just been + * transferred to the tag_8021q tagger from the NPI-based tagger. + * So there might still be all sorts of crap in the queues. On the + * other hand, the MMIO-based matching of PTP frames is very brittle, + * so we need to be careful that there are no extra frames to be + * dequeued over MMIO, since we would never know to discard them. + */ + ocelot_lock_xtr_grp_bh(ocelot, 0); + ocelot_drain_cpu_queue(ocelot, 0); + ocelot_unlock_xtr_grp_bh(ocelot, 0); + + /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info + * about whether the received packets were VLAN-tagged on the wire, + * since they are always tagged on egress towards the CPU port. + * + * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges, + * we must work around the fallout by untagging in software to make + * untagged reception work more or less as expected. + */ + ds->untag_vlan_aware_bridge_pvid = true; + + return 0; +} + +static void felix_tag_8021q_teardown(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *dp; + + dsa_switch_for_each_available_port(dp, ds) + /* Restore the logic from ocelot_init: + * do not forward BPDU frames to the front ports. + */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), + ANA_PORT_CPU_FWD_BPDU_CFG, + dp->index); + + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); + + dsa_switch_for_each_cpu_port(dp, ds) + ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index); + + dsa_tag_8021q_unregister(ds); + + ds->untag_vlan_aware_bridge_pvid = false; +} + +static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) +{ + return dsa_cpu_ports(ds); +} + +static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) +{ + int cpu = felix_cpu_port_for_conduit(ds, conduit); + struct ocelot *ocelot = ds->priv; + + ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); + ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu); + + return felix_update_trapping_destinations(ds, true); +} + +static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { + .setup = felix_tag_8021q_setup, + .teardown = felix_tag_8021q_teardown, + .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, + .change_conduit = felix_tag_8021q_change_conduit, +}; + +static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, + bool uc, bool mc, bool bc) +{ + struct ocelot *ocelot = ds->priv; + unsigned long val; + + val = uc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); + + val = mc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); + + val = bc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); +} + +static void +felix_migrate_host_flood(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; + + if (old_proto_ops) { + mask = old_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, false, false, false); } + + mask = proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); +} + +static int felix_migrate_mdbs(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + struct ocelot *ocelot = ds->priv; + unsigned long from, to; + + if (!old_proto_ops) + return 0; + + from = old_proto_ops->get_host_fwd_mask(ds); + to = proto_ops->get_host_fwd_mask(ds); + + return ocelot_migrate_mdbs(ocelot, from, to); +} + +/* Configure the shared hardware resources for a transition between + * @old_proto_ops and @proto_ops. + * Manual migration is needed because as far as DSA is concerned, no change of + * the CPU port is taking place here, just of the tagging protocol. + */ +static int +felix_tag_proto_setup_shared(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) +{ + bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); + int err; + + err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); + if (err) + return err; + + felix_update_trapping_destinations(ds, using_tag_8021q); + + felix_migrate_host_flood(ds, proto_ops, old_proto_ops); + + return 0; } /* This always leaves the switch in a consistent state, because although the * tag_8021q setup can fail, the NPI setup can't. So either the change is made, * or the restoration is guaranteed to work. */ -static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, +static int felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { + const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - enum dsa_tag_protocol old_proto = felix->tag_proto; int err; - if (proto != DSA_TAG_PROTO_SEVILLE && - proto != DSA_TAG_PROTO_OCELOT && - proto != DSA_TAG_PROTO_OCELOT_8021Q) + switch (proto) { + case DSA_TAG_PROTO_SEVILLE: + case DSA_TAG_PROTO_OCELOT: + proto_ops = &felix_tag_npi_proto_ops; + break; + case DSA_TAG_PROTO_OCELOT_8021Q: + proto_ops = &felix_tag_8021q_proto_ops; + break; + default: return -EPROTONOSUPPORT; + } - felix_del_tag_protocol(ds, cpu, old_proto); + old_proto_ops = felix->tag_proto_ops; - err = felix_set_tag_protocol(ds, cpu, proto); - if (err) { - felix_set_tag_protocol(ds, cpu, old_proto); - return err; - } + if (proto_ops == old_proto_ops) + return 0; + + err = proto_ops->setup(ds); + if (err) + goto setup_failed; + + err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); + if (err) + goto setup_shared_failed; + + if (old_proto_ops) + old_proto_ops->teardown(ds); + felix->tag_proto_ops = proto_ops; felix->tag_proto = proto; return 0; + +setup_shared_failed: + proto_ops->teardown(ds); +setup_failed: + return err; } static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, @@ -636,6 +809,38 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, return felix->tag_proto; } +static void felix_port_set_host_flood(struct dsa_switch *ds, int port, + bool uc, bool mc) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; + + if (uc) + felix->host_flood_uc_mask |= BIT(port); + else + felix->host_flood_uc_mask &= ~BIT(port); + + if (mc) + felix->host_flood_mc_mask |= BIT(port); + else + felix->host_flood_mc_mask &= ~BIT(port); + + mask = felix->tag_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); +} + +static int felix_port_change_conduit(struct dsa_switch *ds, int port, + struct net_device *conduit, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack); +} + static int felix_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { @@ -646,6 +851,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds, return 0; } +static void felix_port_fast_age(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_mact_flush(ocelot, port); + if (err) + dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", + port, ERR_PTR(err)); +} + static int felix_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { @@ -655,35 +871,111 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port, } static int felix_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_add(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); } static int felix_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; - return ocelot_fdb_del(ocelot, port, addr, vid); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_port_is_cpu(dp) && !bridge_dev && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + + return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); +} + +static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *bridge_dev = felix_classify_db(db); + struct ocelot *ocelot = ds->priv; + + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); } static int felix_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_add(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); } static int felix_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db) { + struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - return ocelot_port_mdb_del(ocelot, port, mdb); + if (IS_ERR(bridge_dev)) + return PTR_ERR(bridge_dev); + + if (dsa_is_cpu_port(ds, port) && !bridge_dev && + dsa_mdb_present_in_other_db(ds, port, mdb, db)) + return 0; + + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + + return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); } static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, @@ -709,46 +1001,63 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + ocelot_port_bridge_flags(ocelot, port, val); return 0; } static int felix_bridge_join(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_join(ocelot, port, br); - - return 0; + return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, + extack); } static void felix_bridge_leave(struct dsa_switch *ds, int port, - struct net_device *br) + struct dsa_bridge bridge) { struct ocelot *ocelot = ds->priv; - ocelot_port_bridge_leave(ocelot, port, br); + ocelot_port_bridge_leave(ocelot, port, bridge.dev); } static int felix_lag_join(struct dsa_switch *ds, int port, - struct net_device *bond, - struct netdev_lag_upper_info *info) + struct dsa_lag lag, + struct netdev_lag_upper_info *info, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; + int err; + + err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack); + if (err) + return err; + + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; - return ocelot_port_lag_join(ocelot, port, bond, info); + return felix_port_change_conduit(ds, port, lag.dev, extack); } static int felix_lag_leave(struct dsa_switch *ds, int port, - struct net_device *bond) + struct dsa_lag lag) { struct ocelot *ocelot = ds->priv; - ocelot_port_lag_leave(ocelot, port, bond); + ocelot_port_lag_leave(ocelot, port, lag.dev); - return 0; + /* Update the logical LAG port that serves as tag_8021q CPU port */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + return felix_port_change_conduit(ds, port, lag.dev, NULL); } static int felix_lag_change(struct dsa_switch *ds, int port) @@ -762,7 +1071,8 @@ static int felix_lag_change(struct dsa_switch *ds, int port) } static int felix_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; u16 flags = vlan->flags; @@ -780,15 +1090,31 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port, return ocelot_vlan_prepare(ocelot, port, vlan->vid, flags & BRIDGE_VLAN_INFO_PVID, - flags & BRIDGE_VLAN_INFO_UNTAGGED); + flags & BRIDGE_VLAN_INFO_UNTAGGED, + extack); } static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; + bool using_tag_8021q; + struct felix *felix; + int err; - return ocelot_port_vlan_filtering(ocelot, port, enabled); + err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack); + if (err) + return err; + + felix = ocelot_to_felix(ocelot); + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + if (using_tag_8021q) { + err = felix_update_tag_8021q_rx_rules(ds, port, enabled); + if (err) + return err; + } + + return 0; } static int felix_vlan_add(struct dsa_switch *ds, int port, @@ -799,7 +1125,7 @@ static int felix_vlan_add(struct dsa_switch *ds, int port, u16 flags = vlan->flags; int err; - err = felix_vlan_prepare(ds, port, vlan); + err = felix_vlan_prepare(ds, port, vlan, extack); if (err) return err; @@ -816,152 +1142,130 @@ static int felix_vlan_del(struct dsa_switch *ds, int port, return ocelot_vlan_del(ocelot, port, vlan->vid); } -static int felix_port_enable(struct dsa_switch *ds, int port, - struct phy_device *phy) +static void felix_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) { struct ocelot *ocelot = ds->priv; - ocelot_port_enable(ocelot, port, phy); + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD | + MAC_2500FD; - return 0; + __set_bit(ocelot->ports[port]->phy_mode, + config->supported_interfaces); + if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII) + __set_bit(PHY_INTERFACE_MODE_10G_QXGMII, + config->supported_interfaces); } -static void felix_port_disable(struct dsa_switch *ds, int port) +static void felix_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) { - struct ocelot *ocelot = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; + struct felix *felix; - return ocelot_port_disable(ocelot, port); + felix = ocelot_to_felix(ocelot); + + if (felix->info->phylink_mac_config) + felix->info->phylink_mac_config(ocelot, port, mode, state); } -static void felix_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static struct phylink_pcs * +felix_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t iface) { - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + struct phylink_pcs *pcs = NULL; + int port = dp->index; + struct felix *felix; - if (felix->info->phylink_validate) - felix->info->phylink_validate(ocelot, port, supported, state); -} + felix = ocelot_to_felix(ocelot); -static void felix_phylink_mac_config(struct dsa_switch *ds, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state) -{ - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - struct dsa_port *dp = dsa_to_port(ds, port); + if (felix->pcs && felix->pcs[port]) + pcs = felix->pcs[port]; - if (felix->pcs[port]) - phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs); + return pcs; } -static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void felix_phylink_mac_link_down(struct phylink_config *config, unsigned int link_an_mode, phy_interface_t interface) { - struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; - int err; - - ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, - DEV_MAC_ENA_CFG); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; + struct felix *felix; - ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); + felix = ocelot_to_felix(ocelot); - err = ocelot_port_flush(ocelot, port); - if (err) - dev_err(ocelot->dev, "failed to flush port %d: %d\n", - port, err); - - /* Put the port in reset. */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_MAC_TX_RST | - DEV_CLOCK_CFG_MAC_RX_RST | - DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), - DEV_CLOCK_CFG); + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + felix->info->quirks); } -static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void felix_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int link_an_mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; + struct felix *felix; + + felix = ocelot_to_felix(ocelot); + + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, tx_pause, rx_pause, + felix->info->quirks); + + if (felix->info->port_sched_speed_set) + felix->info->port_sched_speed_set(ocelot, port, speed); +} + +static int felix_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; struct felix *felix = ocelot_to_felix(ocelot); - u32 mac_fc_cfg; - - /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and - * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is - * integrated is that the MAC speed is fixed and it's the PCS who is - * performing the rate adaptation, so we have to write "1000Mbps" into - * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default - * value). - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - switch (speed) { - case SPEED_10: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); - break; - case SPEED_100: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); - break; - case SPEED_1000: - case SPEED_2500: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); - break; - default: - dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", - port, speed); - return; - } - - /* handle Rx pause in all cases, with 2500base-X this is used for rate - * adaptation. - */ - mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; + if (!dsa_port_is_user(dp)) + return 0; - if (tx_pause) - mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | - SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | - SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | - SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; + if (ocelot->npi >= 0) { + struct net_device *conduit = dsa_port_to_conduit(dp); - /* Flow control. Link speed is only used here to evaluate the time - * specification in incoming pause frames. - */ - ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); + if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) { + dev_err(ds->dev, "Multiple conduits are not allowed\n"); + return -EINVAL; + } + } - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); + return dsa_port_simple_hsr_join(ds, port, dp->hsr_dev, NULL); +} - /* Undo the effects of felix_phylink_mac_link_down: - * enable MAC module - */ - ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | - DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); +static void felix_port_disable(struct dsa_switch *ds, int port) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); - /* Enable receiving frames on the port, and activate auto-learning of - * MAC addresses. - */ - ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | - ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(port), - ANA_PORT_PORT_CFG, port); + if (!dsa_port_is_user(dp)) + return; - /* Core: Enable port for frame transfer */ - ocelot_fields_write(ocelot, port, - QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); + if (!dp->hsr_dev || felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return; - if (felix->info->port_sched_speed_set) - felix->info->port_sched_speed_set(ocelot, port, speed); + dsa_port_simple_hsr_leave(ds, port, dp->hsr_dev); } static void felix_port_qos_map_init(struct ocelot *ocelot, int port) @@ -985,6 +1289,63 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port) } } +static void felix_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_stats64(ocelot, port, stats); +} + +static void felix_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_pause_stats(ocelot, port, pause_stats); +} + +static void felix_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges); +} + +static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats); +} + +static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats); +} + +static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats); +} + +static void felix_get_ts_stats(struct dsa_switch *ds, int port, + struct ethtool_ts_stats *ts_stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_ts_stats(ocelot, port, ts_stats); +} + static void felix_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data) { @@ -1008,22 +1369,40 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) } static int felix_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ocelot *ocelot = ds->priv; return ocelot_get_ts_info(ocelot, port, info); } +static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { + [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, + [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, + [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, + [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII, + [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, + [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, +}; + +static int felix_validate_phy_mode(struct felix *felix, int port, + phy_interface_t phy_mode) +{ + u32 modes = felix->info->port_modes[port]; + + if (felix_phy_match_table[phy_mode] & modes) + return 0; + return -EOPNOTSUPP; +} + static int felix_parse_ports_node(struct felix *felix, struct device_node *ports_node, phy_interface_t *port_phy_modes) { - struct ocelot *ocelot = &felix->ocelot; struct device *dev = felix->ocelot.dev; - struct device_node *child; - for_each_available_child_of_node(ports_node, child) { + for_each_available_child_of_node_scoped(ports_node, child) { phy_interface_t phy_mode; u32 port; int err; @@ -1032,7 +1411,6 @@ static int felix_parse_ports_node(struct felix *felix, if (of_property_read_u32(child, "reg", &port) < 0) { dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); - of_node_put(child); return -ENODEV; } @@ -1042,16 +1420,19 @@ static int felix_parse_ports_node(struct felix *felix, dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", port); - of_node_put(child); return -ENODEV; } - err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode); + err = felix_validate_phy_mode(felix, port, phy_mode); if (err < 0) { - dev_err(dev, "Unsupported PHY mode %s on port %d\n", - phy_modes(phy_mode), port); - of_node_put(child); - return err; + dev_info(dev, "Unsupported PHY mode %s on port %d\n", + phy_modes(phy_mode), port); + + /* Leave port_phy_modes[port] = 0, which is also + * PHY_INTERFACE_MODE_NA. This will perform a + * best-effort to bring up as many ports as possible. + */ + continue; } port_phy_modes[port] = phy_mode; @@ -1070,8 +1451,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) switch_node = dev->of_node; ports_node = of_get_child_by_name(switch_node, "ports"); + if (!ports_node) + ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); if (!ports_node) { - dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); + dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); return -ENODEV; } @@ -1081,11 +1464,62 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) return err; } +static struct regmap *felix_request_regmap_by_name(struct felix *felix, + const char *resource_name) +{ + struct ocelot *ocelot = &felix->ocelot; + struct resource res; + int i; + + /* In an MFD configuration, regmaps are registered directly to the + * parent device before the child devices are probed, so there is no + * need to initialize a new one. + */ + if (!felix->info->resources) + return dev_get_regmap(ocelot->dev->parent, resource_name); + + for (i = 0; i < felix->info->num_resources; i++) { + if (strcmp(resource_name, felix->info->resources[i].name)) + continue; + + memcpy(&res, &felix->info->resources[i], sizeof(res)); + res.start += felix->switch_base; + res.end += felix->switch_base; + + return ocelot_regmap_init(ocelot, &res); + } + + return ERR_PTR(-ENOENT); +} + +static struct regmap *felix_request_regmap(struct felix *felix, + enum ocelot_target target) +{ + const char *resource_name = felix->info->resource_names[target]; + + /* If the driver didn't provide a resource name for the target, + * the resource is optional. + */ + if (!resource_name) + return NULL; + + return felix_request_regmap_by_name(felix, resource_name); +} + +static struct regmap *felix_request_port_regmap(struct felix *felix, int port) +{ + char resource_name[32]; + + sprintf(resource_name, "port%d", port); + + return felix_request_regmap_by_name(felix, resource_name); +} + static int felix_init_structs(struct felix *felix, int num_phys_ports) { struct ocelot *ocelot = &felix->ocelot; phy_interface_t *port_phy_modes; - struct resource res; + struct regmap *target; int port, i, err; ocelot->num_phys_ports = num_phys_ports; @@ -1095,10 +1529,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return -ENOMEM; ocelot->map = felix->info->map; - ocelot->stats_layout = felix->info->stats_layout; - ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; + ocelot->vcap_pol.base = felix->info->vcap_pol_base; + ocelot->vcap_pol.max = felix->info->vcap_pol_max; + ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; + ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; ocelot->ops = felix->info->ops; ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; @@ -1116,20 +1552,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) } for (i = 0; i < TARGET_MAX; i++) { - struct regmap *target; - - if (!felix->info->target_io_res[i].name) - continue; - - memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start += felix->switch_base; - res.end += felix->switch_base; - - target = ocelot_regmap_init(ocelot, &res); + target = felix_request_regmap(felix, i); if (IS_ERR(target)) { dev_err(ocelot->dev, - "Failed to map device memory space\n"); + "Failed to map device memory space: %pe\n", + target); kfree(port_phy_modes); return PTR_ERR(target); } @@ -1146,7 +1573,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; - struct regmap *target; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -1158,16 +1584,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return -ENOMEM; } - memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); - res.flags = IORESOURCE_MEM; - res.start += felix->switch_base; - res.end += felix->switch_base; - - target = ocelot_regmap_init(ocelot, &res); + target = felix_request_port_regmap(felix, port); if (IS_ERR(target)) { dev_err(ocelot->dev, - "Failed to map memory space for port %d\n", - port); + "Failed to map memory space for port %d: %pe\n", + port, target); kfree(port_phy_modes); return PTR_ERR(target); } @@ -1175,6 +1596,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; + ocelot_port->index = port; ocelot->ports[port] = ocelot_port; } @@ -1189,21 +1611,104 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return 0; } -/* Hardware initialization done here so that we can allocate structures with - * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing - * us to allocate structures twice (leak memory) and map PCI memory twice - * (which will not work). - */ +static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, + struct sk_buff *skb) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; + struct sk_buff *skb_match = NULL, *skb_tmp; + unsigned long flags; + + if (!clone) + return; + + spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); + + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (skb != clone) + continue; + __skb_unlink(skb, &ocelot_port->tx_skbs); + skb_match = skb; + break; + } + + spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); + + WARN_ONCE(!skb_match, + "Could not find skb clone in TX timestamping list\n"); +} + +#define work_to_xmit_work(w) \ + container_of((w), struct felix_deferred_xmit_work, work) + +static void felix_port_deferred_xmit(struct kthread_work *work) +{ + struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); + struct dsa_switch *ds = xmit_work->dp->ds; + struct sk_buff *skb = xmit_work->skb; + u32 rew_op = ocelot_ptp_rew_op(skb); + struct ocelot *ocelot = ds->priv; + int port = xmit_work->dp->index; + int retries = 10; + + ocelot_lock_inj_grp(ocelot, 0); + + do { + if (ocelot_can_inject(ocelot, 0)) + break; + + cpu_relax(); + } while (--retries); + + if (!retries) { + ocelot_unlock_inj_grp(ocelot, 0); + dev_err(ocelot->dev, "port %d failed to inject skb\n", + port); + ocelot_port_purge_txtstamp_skb(ocelot, port, skb); + kfree_skb(skb); + return; + } + + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); + + ocelot_unlock_inj_grp(ocelot, 0); + + consume_skb(skb); + kfree(xmit_work); +} + +static int felix_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + struct ocelot_8021q_tagger_data *tagger_data; + + switch (proto) { + case DSA_TAG_PROTO_OCELOT_8021Q: + tagger_data = ocelot_8021q_tagger_data(ds); + tagger_data->xmit_work_fn = felix_port_deferred_xmit; + return 0; + case DSA_TAG_PROTO_OCELOT: + case DSA_TAG_PROTO_SEVILLE: + return 0; + default: + return -EPROTONOSUPPORT; + } +} + static int felix_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port, err; + struct dsa_port *dp; + int err; err = felix_init_structs(felix, ds->num_ports); if (err) return err; + if (ocelot->targets[HSIO]) + ocelot_pll5_init(ocelot); + err = ocelot_init(ocelot); if (err) goto out_mdiobus_free; @@ -1217,44 +1722,47 @@ static int felix_setup(struct dsa_switch *ds) } } - for (port = 0; port < ds->num_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; + dsa_switch_for_each_available_port(dp, ds) { + ocelot_init_port(ocelot, dp->index); - ocelot_init_port(ocelot, port); + if (felix->info->configure_serdes) + felix->info->configure_serdes(ocelot, dp->index, + dp->dn); /* Set the default QoS Classification based on PCP and DEI * bits of vlan tag. */ - felix_port_qos_map_init(ocelot, port); + felix_port_qos_map_init(ocelot, dp->index); + } + + if (felix->info->request_irq) { + err = felix->info->request_irq(ocelot); + if (err) { + dev_err(ocelot->dev, "Failed to request IRQ: %pe\n", + ERR_PTR(err)); + goto out_deinit_ports; + } } err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports; - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; - - /* The initial tag protocol is NPI which always returns 0, so - * there's no real point in checking for errors. - */ - felix_set_tag_protocol(ds, port, felix->tag_proto); - } + /* The initial tag protocol is NPI which won't fail during initial + * setup, there's no real point in checking for errors. + */ + felix_change_tag_protocol(ds, felix->tag_proto); ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; + ds->fdb_isolation = true; + ds->max_num_bridges = ds->num_ports; return 0; out_deinit_ports: - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_deinit_port(ocelot, port); - } + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); @@ -1270,50 +1778,56 @@ static void felix_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - int port; + struct dsa_port *dp; - for (port = 0; port < ds->num_ports; port++) { - if (!dsa_is_cpu_port(ds, port)) - continue; + rtnl_lock(); + if (felix->tag_proto_ops) + felix->tag_proto_ops->teardown(ds); + rtnl_unlock(); - felix_del_tag_protocol(ds, port, felix->tag_proto); - } + dsa_switch_for_each_available_port(dp, ds) + ocelot_deinit_port(ocelot, dp->index); ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); - for (port = 0; port < ocelot->num_phys_ports; port++) { - if (dsa_is_unused_port(ds, port)) - continue; - - ocelot_deinit_port(ocelot, port); - } - if (felix->info->mdio_bus_free) felix->info->mdio_bus_free(ocelot); } static int felix_hwtstamp_get(struct dsa_switch *ds, int port, - struct ifreq *ifr) + struct kernel_hwtstamp_config *config) { struct ocelot *ocelot = ds->priv; - return ocelot_hwstamp_get(ocelot, port, ifr); + ocelot_hwstamp_get(ocelot, port, config); + + return 0; } static int felix_hwtstamp_set(struct dsa_switch *ds, int port, - struct ifreq *ifr) + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; - return ocelot_hwstamp_set(ocelot, port, ifr); + err = ocelot_hwstamp_set(ocelot, port, config, extack); + if (err) + return err; + + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + + return felix_update_trapping_destinations(ds, using_tag_8021q); } -static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) +static bool felix_check_xtr_pkt(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); - int err, grp = 0; + int err = 0, grp = 0; if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) return false; @@ -1321,8 +1835,7 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) if (!felix->info->quirk_no_xtr_irq) return false; - if (ptp_type == PTP_CLASS_NONE) - return false; + ocelot_lock_xtr_grp(ocelot, grp); while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { struct sk_buff *skb; @@ -1353,8 +1866,14 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type) } out: - if (err < 0) + if (err < 0) { + dev_err_ratelimited(ocelot->dev, + "Error during packet extraction: %pe\n", + ERR_PTR(err)); ocelot_drain_cpu_queue(ocelot, 0); + } + + ocelot_unlock_xtr_grp(ocelot, grp); return true; } @@ -1362,19 +1881,31 @@ out: static bool felix_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb, unsigned int type) { - u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN; + u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; struct skb_shared_hwtstamps *shhwtstamps; struct ocelot *ocelot = ds->priv; - u32 tstamp_lo, tstamp_hi; struct timespec64 ts; - u64 tstamp, val; + u32 tstamp_hi; + u64 tstamp; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_L2: + if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2)) + return false; + break; + case PTP_CLASS_IPV4: + case PTP_CLASS_IPV6: + if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4)) + return false; + break; + } /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb * for RX timestamping. Then free it, and poll for its copy through * MMIO in the CPU port module, and inject that into the stack from * ocelot_xtr_poll(). */ - if (felix_check_xtr_pkt(ocelot, type)) { + if (felix_check_xtr_pkt(ocelot)) { kfree_skb(skb); return true; } @@ -1382,9 +1913,6 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port, ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - ocelot_xfh_get_rew_val(extraction, &val); - tstamp_lo = (u32)val; - tstamp_hi = tstamp >> 32; if ((tstamp & 0xffffffff) < tstamp_lo) tstamp_hi--; @@ -1406,8 +1934,12 @@ static void felix_txtstamp(struct dsa_switch *ds, int port, if (!ocelot->ptp) return; - if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { + dev_err_ratelimited(ds->dev, + "port %d delivering skb without TX timestamp\n", + port); return; + } if (clone) OCELOT_SKB_CB(skb)->clone = clone; @@ -1416,9 +1948,17 @@ static void felix_txtstamp(struct dsa_switch *ds, int port, static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_set_maxlen(ocelot, port, new_mtu); + mutex_lock(&ocelot->fwd_domain_lock); + + if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update) + ocelot->ops->tas_guard_bands_update(ocelot, port); + + mutex_unlock(&ocelot->fwd_domain_lock); + return 0; } @@ -1433,8 +1973,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + bool using_tag_8021q; + int err; + + err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); + if (err) + return err; - return ocelot_cls_flower_replace(ocelot, port, cls, ingress); + using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; + + return felix_update_trapping_destinations(ds, using_tag_8021q); } static int felix_cls_flower_del(struct dsa_switch *ds, int port, @@ -1472,6 +2021,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port) ocelot_port_policer_del(ocelot, port); } +static int felix_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress, struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, + ingress, extack); +} + +static void felix_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mirror_del(ocelot, port, mirror->ingress); +} + static int felix_port_setup_tc(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data) @@ -1621,25 +2188,152 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port, return ocelot_mrp_del_ring_role(ocelot, port, mrp); } -const struct dsa_switch_ops felix_switch_ops = { +static int felix_port_get_default_prio(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_default_prio(ocelot, port); +} + +static int felix_port_set_default_prio(struct dsa_switch *ds, int port, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_set_default_prio(ocelot, port, prio); +} + +static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_dscp_prio(ocelot, port, dscp); +} + +static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); +} + +static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, + u8 prio) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); +} + +static int felix_get_mm(struct dsa_switch *ds, int port, + struct ethtool_mm_state *state) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_get_mm(ocelot, port, state); +} + +static int felix_set_mm(struct dsa_switch *ds, int port, + struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_set_mm(ocelot, port, cfg, extack); +} + +static void felix_get_mm_stats(struct dsa_switch *ds, int port, + struct ethtool_mm_stats *stats) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_get_mm_stats(ocelot, port, stats); +} + +/* Depending on port type, we may be able to support the offload later (with + * the "ocelot"/"seville" tagging protocols), or never. + * If we return 0, the dp->hsr_dev reference is kept for later; if we return + * -EOPNOTSUPP, it is cleared (which helps to not bother + * dsa_port_simple_hsr_leave() with an offload that didn't pass validation). + */ +static int felix_port_hsr_join(struct dsa_switch *ds, int port, + struct net_device *hsr, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) { + int err; + + err = dsa_port_simple_hsr_validate(ds, port, hsr, extack); + if (err) + return err; + + NL_SET_ERR_MSG_MOD(extack, + "Offloading not supported with \"ocelot-8021q\""); + return 0; + } + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_join(ds, port, hsr, extack); +} + +static int felix_port_hsr_leave(struct dsa_switch *ds, int port, + struct net_device *hsr) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + return 0; + + if (!(dsa_to_port(ds, port)->user->flags & IFF_UP)) + return 0; + + return dsa_port_simple_hsr_leave(ds, port, hsr); +} + +static const struct phylink_mac_ops felix_phylink_mac_ops = { + .mac_select_pcs = felix_phylink_mac_select_pcs, + .mac_config = felix_phylink_mac_config, + .mac_link_down = felix_phylink_mac_link_down, + .mac_link_up = felix_phylink_mac_link_up, +}; + +static const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, + .connect_tag_protocol = felix_connect_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, .set_ageing_time = felix_set_ageing_time, + .get_mm = felix_get_mm, + .set_mm = felix_set_mm, + .get_mm_stats = felix_get_mm_stats, + .get_stats64 = felix_get_stats64, + .get_pause_stats = felix_get_pause_stats, + .get_rmon_stats = felix_get_rmon_stats, + .get_ts_stats = felix_get_ts_stats, + .get_eth_ctrl_stats = felix_get_eth_ctrl_stats, + .get_eth_mac_stats = felix_get_eth_mac_stats, + .get_eth_phy_stats = felix_get_eth_phy_stats, .get_strings = felix_get_strings, .get_ethtool_stats = felix_get_ethtool_stats, .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, - .phylink_validate = felix_phylink_validate, - .phylink_mac_config = felix_phylink_mac_config, - .phylink_mac_link_down = felix_phylink_mac_link_down, - .phylink_mac_link_up = felix_phylink_mac_link_up, + .phylink_get_caps = felix_phylink_get_caps, .port_enable = felix_port_enable, .port_disable = felix_port_disable, + .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .lag_fdb_add = felix_lag_fdb_add, + .lag_fdb_del = felix_lag_fdb_del, .port_mdb_add = felix_mdb_add, .port_mdb_del = felix_mdb_del, .port_pre_bridge_flags = felix_pre_bridge_flags, @@ -1661,6 +2355,8 @@ const struct dsa_switch_ops felix_switch_ops = { .port_max_mtu = felix_get_max_mtu, .port_policer_add = felix_port_policer_add, .port_policer_del = felix_port_policer_del, + .port_mirror_add = felix_port_mirror_add, + .port_mirror_del = felix_port_mirror_del, .cls_flower_add = felix_cls_flower_add, .cls_flower_del = felix_cls_flower_del, .cls_flower_stats = felix_cls_flower_stats, @@ -1679,8 +2375,66 @@ const struct dsa_switch_ops felix_switch_ops = { .port_mrp_del = felix_mrp_del, .port_mrp_add_ring_role = felix_mrp_add_ring_role, .port_mrp_del_ring_role = felix_mrp_del_ring_role, + .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, + .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, + .port_get_default_prio = felix_port_get_default_prio, + .port_set_default_prio = felix_port_set_default_prio, + .port_get_dscp_prio = felix_port_get_dscp_prio, + .port_add_dscp_prio = felix_port_add_dscp_prio, + .port_del_dscp_prio = felix_port_del_dscp_prio, + .port_set_host_flood = felix_port_set_host_flood, + .port_change_conduit = felix_port_change_conduit, + .port_hsr_join = felix_port_hsr_join, + .port_hsr_leave = felix_port_hsr_leave, }; +int felix_register_switch(struct device *dev, resource_size_t switch_base, + int num_flooding_pgids, bool ptp, + bool mm_supported, + enum dsa_tag_protocol init_tag_proto, + const struct felix_info *info) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct felix *felix; + int err; + + felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL); + if (!felix) + return -ENOMEM; + + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + dev_set_drvdata(dev, felix); + + ocelot = &felix->ocelot; + ocelot->dev = dev; + ocelot->num_flooding_pgids = num_flooding_pgids; + ocelot->ptp = ptp; + ocelot->mm_supported = mm_supported; + + felix->info = info; + felix->switch_base = switch_base; + felix->ds = ds; + felix->tag_proto = init_tag_proto; + + ds->dev = dev; + ds->num_ports = info->num_ports; + ds->num_tx_queues = OCELOT_NUM_TC; + ds->ops = &felix_switch_ops; + ds->phylink_mac_ops = &felix_phylink_mac_ops; + ds->priv = ocelot; + + err = dsa_register_switch(ds); + if (err) + dev_err_probe(dev, err, "Failed to register DSA switch\n"); + + return err; +} +EXPORT_SYMBOL_GPL(felix_register_switch); + struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) { struct felix *felix = ocelot_to_felix(ocelot); @@ -1689,8 +2443,9 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) if (!dsa_is_user_port(ds, port)) return NULL; - return dsa_to_port(ds, port)->slave; + return dsa_to_port(ds, port)->user; } +EXPORT_SYMBOL_GPL(felix_port_to_netdev); int felix_netdev_to_port(struct net_device *dev) { @@ -1702,3 +2457,7 @@ int felix_netdev_to_port(struct net_device *dev) return dp->index; } +EXPORT_SYMBOL_GPL(felix_netdev_to_port); + +MODULE_DESCRIPTION("Felix DSA library"); +MODULE_LICENSE("GPL"); |
