summaryrefslogtreecommitdiff
path: root/net/dsa
diff options
context:
space:
mode:
Diffstat (limited to 'net/dsa')
-rw-r--r--net/dsa/Kconfig36
-rw-r--r--net/dsa/Makefile3
-rw-r--r--net/dsa/conduit.c162
-rw-r--r--net/dsa/devlink.c26
-rw-r--r--net/dsa/dsa.c142
-rw-r--r--net/dsa/port.c63
-rw-r--r--net/dsa/tag.c5
-rw-r--r--net/dsa/tag.h159
-rw-r--r--net/dsa/tag_8021q.c86
-rw-r--r--net/dsa/tag_8021q.h7
-rw-r--r--net/dsa/tag_brcm.c137
-rw-r--r--net/dsa/tag_gswip.c6
-rw-r--r--net/dsa/tag_hellcreek.c3
-rw-r--r--net/dsa/tag_ksz.c50
-rw-r--r--net/dsa/tag_mtk.c3
-rw-r--r--net/dsa/tag_mxl-gsw1xx.c117
-rw-r--r--net/dsa/tag_ocelot.c43
-rw-r--r--net/dsa/tag_ocelot_8021q.c6
-rw-r--r--net/dsa/tag_qca.c3
-rw-r--r--net/dsa/tag_rtl4_a.c2
-rw-r--r--net/dsa/tag_rtl8_4.c3
-rw-r--r--net/dsa/tag_rzn1_a5psw.c3
-rw-r--r--net/dsa/tag_sja1105.c74
-rw-r--r--net/dsa/tag_trailer.c3
-rw-r--r--net/dsa/tag_vsc73xx_8021q.c68
-rw-r--r--net/dsa/tag_xrs700x.c8
-rw-r--r--net/dsa/tag_yt921x.c139
-rw-r--r--net/dsa/user.c206
28 files changed, 1156 insertions, 407 deletions
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 8d5bf869eb14..f86b30742122 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -42,12 +42,24 @@ config NET_DSA_TAG_BRCM
Broadcom switches which place the tag after the MAC source address.
config NET_DSA_TAG_BRCM_LEGACY
- tristate "Tag driver for Broadcom legacy switches using in-frame headers"
+ tristate "Tag driver for BCM63xx legacy switches using in-frame headers"
select NET_DSA_TAG_BRCM_COMMON
help
Say Y if you want to enable support for tagging frames for the
- Broadcom legacy switches which place the tag after the MAC source
+ BCM63xx legacy switches which place the tag after the MAC source
address.
+ This tag is used in BCM63xx legacy switches which work without the
+ original FCS and length before the tag insertion.
+
+config NET_DSA_TAG_BRCM_LEGACY_FCS
+ tristate "Tag driver for BCM53xx legacy switches using in-frame headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ BCM53xx legacy switches which place the tag after the MAC source
+ address.
+ This tag is used in BCM53xx legacy switches which expect original
+ FCS and length before the tag insertion to be present.
config NET_DSA_TAG_BRCM_PREPEND
tristate "Tag driver for Broadcom switches using prepended headers"
@@ -92,6 +104,14 @@ config NET_DSA_TAG_MTK
Say Y or M if you want to enable support for tagging frames for
Mediatek switches.
+config NET_DSA_TAG_MXL_GSW1XX
+ tristate "Tag driver for MaxLinear GSW1xx switches"
+ help
+ The GSW1xx family of switches supports an 8-byte special tag which
+ can be used on the CPU port of the switch.
+ Say Y or M if you want to enable support for tagging frames for
+ MaxLinear GSW1xx switches.
+
config NET_DSA_TAG_KSZ
tristate "Tag driver for Microchip 8795/937x/9477/9893 families of switches"
help
@@ -166,10 +186,22 @@ config NET_DSA_TAG_TRAILER
Say Y or M if you want to enable support for tagging frames at
with a trailed. e.g. Marvell 88E6060.
+config NET_DSA_TAG_VSC73XX_8021Q
+ tristate "Tag driver for Microchip/Vitesse VSC73xx family of switches, using VLAN"
+ help
+ Say Y or M if you want to enable support for tagging frames with a
+ custom VLAN-based header.
+
config NET_DSA_TAG_XRS700X
tristate "Tag driver for XRS700x switches"
help
Say Y or M if you want to enable support for tagging frames for
Arrow SpeedChips XRS700x switches that use a single byte tag trailer.
+config NET_DSA_TAG_YT921X
+ tristate "Tag driver for Motorcomm YT921x switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ Motorcomm YT921x switches.
+
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 8a1894a42552..42d173f5a701 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+obj-$(CONFIG_NET_DSA_TAG_MXL_GSW1XX) += tag_mxl-gsw1xx.o
obj-$(CONFIG_NET_DSA_TAG_NONE) += tag_none.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
@@ -37,7 +38,9 @@ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
+obj-$(CONFIG_NET_DSA_TAG_YT921X) += tag_yt921x.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/net/dsa/conduit.c b/net/dsa/conduit.c
index 3dfdb3cb47dc..a1b044467bd6 100644
--- a/net/dsa/conduit.c
+++ b/net/dsa/conduit.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/dsa.h>
+#include <net/netdev_lock.h>
#include "conduit.h"
#include "dsa.h"
@@ -25,8 +26,10 @@ static int dsa_conduit_get_regs_len(struct net_device *dev)
int ret = 0;
int len;
- if (ops->get_regs_len) {
+ if (ops && ops->get_regs_len) {
+ netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
+ netdev_unlock_ops(dev);
if (len < 0)
return len;
ret += len;
@@ -56,12 +59,16 @@ static void dsa_conduit_get_regs(struct net_device *dev,
int port = cpu_dp->index;
int len;
- if (ops->get_regs_len && ops->get_regs) {
+ if (ops && ops->get_regs_len && ops->get_regs) {
+ netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
- if (len < 0)
+ if (len < 0) {
+ netdev_unlock_ops(dev);
return;
+ }
regs->len = len;
ops->get_regs(dev, regs, data);
+ netdev_unlock_ops(dev);
data += regs->len;
}
@@ -80,28 +87,56 @@ static void dsa_conduit_get_regs(struct net_device *dev,
}
}
+static ssize_t dsa_conduit_append_port_stats(struct dsa_switch *ds, int port,
+ u64 *data, size_t start)
+{
+ int count;
+
+ if (!ds->ops->get_sset_count)
+ return 0;
+
+ count = ds->ops->get_sset_count(ds, port, ETH_SS_STATS);
+ if (count < 0)
+ return count;
+
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, port, data + start);
+
+ return count;
+}
+
static void dsa_conduit_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
- uint64_t *data)
+ u64 *data)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
- int count = 0;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
- if (ops->get_sset_count && ops->get_ethtool_stats) {
- count = ops->get_sset_count(dev, ETH_SS_STATS);
+ if (ops && ops->get_sset_count && ops->get_ethtool_stats) {
+ netdev_lock_ops(dev);
+ mcount = ops->get_sset_count(dev, ETH_SS_STATS);
ops->get_ethtool_stats(dev, stats, data);
+ netdev_unlock_ops(dev);
}
- if (ds->ops->get_ethtool_stats)
- ds->ops->get_ethtool_stats(ds, port, data + count);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_stats(dp->ds, dp->index,
+ data, mcount);
+ if (count < 0)
+ return;
+
+ mcount += count;
+ }
}
static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
struct ethtool_stats *stats,
- uint64_t *data)
+ u64 *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -109,13 +144,15 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
int port = cpu_dp->index;
int count = 0;
- if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ if (dev->phydev && (!ops || !ops->get_ethtool_phy_stats)) {
count = phy_ethtool_get_sset_count(dev->phydev);
if (count >= 0)
phy_ethtool_get_stats(dev->phydev, stats, data);
- } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ } else if (ops && ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ netdev_lock_ops(dev);
count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
ops->get_ethtool_phy_stats(dev, stats, data);
+ netdev_unlock_ops(dev);
}
if (count < 0)
@@ -125,44 +162,84 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
ds->ops->get_ethtool_phy_stats(ds, port, data + count);
}
+static void dsa_conduit_append_port_sset_count(struct dsa_switch *ds, int port,
+ int sset, int *count)
+{
+ if (ds->ops->get_sset_count)
+ *count += ds->ops->get_sset_count(ds, port, sset);
+}
+
static int dsa_conduit_get_sset_count(struct net_device *dev, int sset)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
int count = 0;
+ netdev_lock_ops(dev);
if (sset == ETH_SS_PHY_STATS && dev->phydev &&
- !ops->get_ethtool_phy_stats)
+ (!ops || !ops->get_ethtool_phy_stats))
count = phy_ethtool_get_sset_count(dev->phydev);
- else if (ops->get_sset_count)
+ else if (ops && ops->get_sset_count)
count = ops->get_sset_count(dev, sset);
+ netdev_unlock_ops(dev);
if (count < 0)
count = 0;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ dsa_conduit_append_port_sset_count(dp->ds, dp->index, sset,
+ &count);
+ }
return count;
}
-static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset,
- uint8_t *data)
+static ssize_t dsa_conduit_append_port_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data,
+ size_t start)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
- const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count, i;
- uint8_t pfx[4];
- uint8_t *ndata;
+ u8 pfx[8], *ndata;
+ int count, i;
- snprintf(pfx, sizeof(pfx), "p%.2d", port);
+ if (!ds->ops->get_strings)
+ return 0;
+
+ snprintf(pfx, sizeof(pfx), "s%.2d_p%.2d", ds->index, port);
/* We do not want to be NULL-terminated, since this is a prefix */
pfx[sizeof(pfx) - 1] = '_';
+ ndata = data + start * len;
+ /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+ * the output after to prepend our CPU port prefix we
+ * constructed earlier
+ */
+ ds->ops->get_strings(ds, port, stringset, ndata);
+ count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return count;
+ for (i = 0; i < count; i++) {
+ memmove(ndata + (i * len + sizeof(pfx)),
+ ndata + i * len, len - sizeof(pfx));
+ memcpy(ndata + i * len, pfx, sizeof(pfx));
+ }
+
+ return count;
+}
+
+static void dsa_conduit_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
+
+ netdev_lock_ops(dev);
if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
!ops->get_ethtool_phy_stats) {
mcount = phy_ethtool_get_sset_count(dev->phydev);
@@ -176,22 +253,19 @@ static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset,
mcount = 0;
ops->get_strings(dev, stringset, data);
}
+ netdev_unlock_ops(dev);
- if (ds->ops->get_strings) {
- ndata = data + mcount * len;
- /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
- * the output after to prepend our CPU port prefix we
- * constructed earlier
- */
- ds->ops->get_strings(ds, port, stringset, ndata);
- count = ds->ops->get_sset_count(ds, port, stringset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_strings(dp->ds, dp->index,
+ stringset, data,
+ mcount);
if (count < 0)
return;
- for (i = 0; i < count; i++) {
- memmove(ndata + (i * len + sizeof(pfx)),
- ndata + i * len, len - sizeof(pfx));
- memcpy(ndata + i * len, pfx, sizeof(pfx));
- }
+
+ mcount += count;
}
}
diff --git a/net/dsa/devlink.c b/net/dsa/devlink.c
index 0aac887d0098..ed342f345692 100644
--- a/net/dsa/devlink.c
+++ b/net/dsa/devlink.c
@@ -182,7 +182,8 @@ static const struct devlink_ops dsa_devlink_ops = {
};
int dsa_devlink_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
@@ -229,10 +230,15 @@ int dsa_devlink_resource_register(struct dsa_switch *ds,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params)
{
- return devlink_resource_register(ds->devlink, resource_name,
- resource_size, resource_id,
- parent_resource_id,
- size_params);
+ int ret;
+
+ devl_lock(ds->devlink);
+ ret = devl_resource_register(ds->devlink, resource_name, resource_size,
+ resource_id, parent_resource_id,
+ size_params);
+ devl_unlock(ds->devlink);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
@@ -247,15 +253,19 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv)
{
- return devlink_resource_occ_get_register(ds->devlink, resource_id,
- occ_get, occ_get_priv);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_register(ds->devlink, resource_id, occ_get,
+ occ_get_priv);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id)
{
- devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 668c729946ea..a20efabe778f 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/if_hsr.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -862,6 +863,16 @@ static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
kfree(dst->lags);
}
+static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
+{
+ struct dsa_link *dl, *next;
+
+ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+ list_del(&dl->list);
+ kfree(dl);
+ }
+}
+
static int dsa_tree_setup(struct dsa_switch_tree *dst)
{
bool complete;
@@ -879,7 +890,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
err = dsa_tree_setup_cpu_ports(dst);
if (err)
- return err;
+ goto teardown_rtable;
err = dsa_tree_setup_switches(dst);
if (err)
@@ -911,14 +922,14 @@ teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
+teardown_rtable:
+ dsa_tree_teardown_routing_table(dst);
return err;
}
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
{
- struct dsa_link *dl, *next;
-
if (!dst->setup)
return;
@@ -932,10 +943,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_cpu_ports(dst);
- list_for_each_entry_safe(dl, next, &dst->rtable, list) {
- list_del(&dl->list);
- kfree(dl);
- }
+ dsa_tree_teardown_routing_table(dst);
pr_info("DSA: tree %d torn down\n", dst->index);
@@ -1367,7 +1375,7 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
return dsa_switch_parse_ports_of(ds, dn);
}
-static int dev_is_class(struct device *dev, void *class)
+static int dev_is_class(struct device *dev, const void *class)
{
if (dev->class != NULL && !strcmp(dev->class->name, class))
return 1;
@@ -1478,12 +1486,44 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
+ struct dsa_mac_addr *a, *tmp;
struct dsa_port *dp, *next;
+ struct dsa_vlan *v, *n;
dsa_switch_for_each_port_safe(dp, next, ds) {
- WARN_ON(!list_empty(&dp->fdbs));
- WARN_ON(!list_empty(&dp->mdbs));
- WARN_ON(!list_empty(&dp->vlans));
+ /* These are either entries that upper layers lost track of
+ * (probably due to bugs), or installed through interfaces
+ * where one does not necessarily have to remove them, like
+ * ndo_dflt_fdb_add().
+ */
+ list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up unicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up multicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ /* These are entries that upper layers have lost track of,
+ * probably due to bugs, but also due to dsa_port_do_vlan_del()
+ * having failed and the VLAN entry still lingering on.
+ */
+ list_for_each_entry_safe(v, n, &dp->vlans, list) {
+ dev_info(ds->dev,
+ "Cleaning up vid %u from port %d\n",
+ v->vid, dp->index);
+ list_del(&v->list);
+ kfree(v);
+ }
+
list_del(&dp->list);
kfree(dp);
}
@@ -1505,14 +1545,6 @@ static int dsa_switch_probe(struct dsa_switch *ds)
if (!ds->num_ports)
return -EINVAL;
- if (ds->phylink_mac_ops) {
- if (ds->ops->phylink_mac_select_pcs ||
- ds->ops->phylink_mac_config ||
- ds->ops->phylink_mac_link_down ||
- ds->ops->phylink_mac_link_up)
- return -EINVAL;
- }
-
if (np) {
err = dsa_switch_parse_of(ds, np);
if (err)
@@ -1577,6 +1609,7 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *conduit, *user_dev;
+ LIST_HEAD(close_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1586,10 +1619,16 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
rtnl_lock();
+ dsa_switch_for_each_cpu_port(dp, ds)
+ list_add(&dp->conduit->close_list, &close_list);
+
+ netif_close_many(&close_list, true);
+
dsa_switch_for_each_user_port(dp, ds) {
conduit = dsa_port_to_conduit(dp);
user_dev = dp->user;
+ netif_device_detach(user_dev);
netdev_upper_dev_unlink(conduit, user_dev);
}
@@ -1728,6 +1767,70 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
+/* Helpers for switches without specific HSR offloads, but which can implement
+ * NETIF_F_HW_HSR_DUP because their tagger uses dsa_xmit_port_mask()
+ */
+int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ enum hsr_port_type type;
+ int err;
+
+ err = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
+ if (err)
+ return err;
+
+ if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR slave ports can be offloaded");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_validate);
+
+int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ int err;
+
+ err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
+ if (err)
+ return err;
+
+ dsa_hsr_foreach_port(other_dp, ds, hsr) {
+ if (other_dp != dp) {
+ dp->user->features |= NETIF_F_HW_HSR_DUP;
+ other_dp->user->features |= NETIF_F_HW_HSR_DUP;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_join);
+
+int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+
+ dsa_hsr_foreach_port(other_dp, ds, hsr) {
+ if (other_dp != dp) {
+ dp->user->features &= ~NETIF_F_HW_HSR_DUP;
+ other_dp->user->features &= ~NETIF_F_HW_HSR_DUP;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_leave);
+
static const struct dsa_stubs __dsa_stubs = {
.conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
};
@@ -1791,3 +1894,4 @@ MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 25258b33e59e..ca3a7f52229b 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -116,19 +116,15 @@ static bool dsa_port_can_configure_learning(struct dsa_port *dp)
bool dsa_port_supports_hwtstamp(struct dsa_port *dp)
{
+ struct kernel_hwtstamp_config config = {};
struct dsa_switch *ds = dp->ds;
- struct ifreq ifr = {};
int err;
if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
return false;
- /* "See through" shim implementations of the "get" method.
- * Since we can't cook up a complete ioctl request structure, this will
- * fail in copy_to_user() with -EFAULT, which hopefully is enough to
- * detect a valid implementation.
- */
- err = ds->ops->port_hwtstamp_get(ds, dp->index, &ifr);
+ /* "See through" shim implementations of the "get" method. */
+ err = ds->ops->port_hwtstamp_get(ds, dp->index, &config);
return err != -EOPNOTSUPP;
}
@@ -1575,44 +1571,32 @@ void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
cpu_dp->tag_ops = tag_ops;
}
-static struct phylink_pcs *
-dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
- phy_interface_t interface)
+/* dsa_supports_eee - indicate that EEE is supported
+ * @ds: pointer to &struct dsa_switch
+ * @port: port index
+ *
+ * A default implementation for the .support_eee() DSA operations member,
+ * which drivers can use to indicate that they support EEE on all of their
+ * user ports.
+ *
+ * Returns: true
+ */
+bool dsa_supports_eee(struct dsa_switch *ds, int port)
{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
- struct dsa_switch *ds = dp->ds;
-
- if (ds->ops->phylink_mac_select_pcs)
- pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
-
- return pcs;
+ return true;
}
+EXPORT_SYMBOL_GPL(dsa_supports_eee);
static void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_config)
- return;
-
- ds->ops->phylink_mac_config(ds, dp->index, mode, state);
}
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_link_down)
- return;
-
- ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
}
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
@@ -1622,18 +1606,9 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct dsa_port *dp = dsa_phylink_to_port(config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_link_up)
- return;
-
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
}
static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
- .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
.mac_config = dsa_port_phylink_mac_config,
.mac_link_down = dsa_port_phylink_mac_link_down,
.mac_link_up = dsa_port_phylink_mac_link_up,
@@ -1871,9 +1846,6 @@ static void dsa_shared_port_link_down(struct dsa_port *dp)
if (ds->phylink_mac_ops && ds->phylink_mac_ops->mac_link_down)
ds->phylink_mac_ops->mac_link_down(&dp->pl_config, MLO_AN_FIXED,
PHY_INTERFACE_MODE_NA);
- else if (ds->ops->phylink_mac_link_down)
- ds->ops->phylink_mac_link_down(ds, dp->index, MLO_AN_FIXED,
- PHY_INTERFACE_MODE_NA);
}
int dsa_shared_port_link_register_of(struct dsa_port *dp)
@@ -1937,6 +1909,9 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
struct dsa_switch *ds = dp->ds;
int err;
+ if (!dp->hsr_dev)
+ return;
+
dp->hsr_dev = NULL;
if (ds->ops->port_hsr_leave) {
diff --git a/net/dsa/tag.c b/net/dsa/tag.c
index 6e402d49afd3..79ad105902d9 100644
--- a/net/dsa/tag.c
+++ b/net/dsa/tag.c
@@ -105,8 +105,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
p = netdev_priv(skb->dev);
- if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
- nskb = dsa_untag_bridge_pvid(skb);
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid ||
+ cpu_dp->ds->untag_vlan_aware_bridge_pvid)) {
+ nskb = dsa_software_vlan_untag(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index f6b9c73718df..cf52283fe9df 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -44,46 +44,81 @@ static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
return NULL;
}
-/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
- * frames as untagged, since the bridge will not untag them.
+/**
+ * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge can process tagged packets. Software like STP/PTP may not. The
+ * bridge can also process untagged packets, to the same effect as if they were
+ * tagged with the PVID of the ingress port. So packets tagged with the PVID of
+ * the bridge port must be software-untagged, to support both use cases.
*/
-static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
{
- struct dsa_port *dp = dsa_user_to_port(skb->dev);
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *dev = skb->dev;
- struct net_device *upper_dev;
- u16 vid, pvid, proto;
+ u16 pvid, proto;
int err;
- if (!br || br_vlan_enabled(br))
- return skb;
-
err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- /* Move VLAN tag from data to hwaccel */
- if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
- skb = skb_vlan_untag(skb);
- if (!skb)
- return NULL;
- }
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
- if (!skb_vlan_tag_present(skb))
- return skb;
+ if (vid == pvid && skb->vlan_proto == htons(proto))
+ __vlan_hwaccel_clear_tag(skb);
+}
- vid = skb_vlan_tag_get_id(skb);
+/**
+ * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
+ * on the plain port, or on a VLAN upper interface). Maybe packets are coming
+ * to software as tagged with a driver-defined VID which is NOT equal to the
+ * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
+ * should NOT be committed to hardware). DSA needs a method for this private
+ * VID to be communicated by software to it, and if packets are tagged with it,
+ * software-untag them. Note: the private VID may be different per bridge, to
+ * support the FDB isolation use case.
+ *
+ * FIXME: this is currently implemented based on the broken assumption that
+ * the "private VID" used by the driver in VLAN-unaware mode is equal to the
+ * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
+ * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
+ * this function removes is wrong.
+ *
+ * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
+ * to make the two independent. Only then, if there still remains a need to
+ * strip the private VID from packets, then a new ds->ops->get_private_vid()
+ * API shall be introduced to communicate to DSA what this VID is, which needs
+ * to be stripped here.
+ */
+static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ u16 pvid, proto;
+ int err;
- /* We already run under an RCU read-side critical section since
- * we are called from netif_receive_skb_list_internal().
- */
- err = br_vlan_get_pvid_rcu(dev, &pvid);
+ err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- if (vid != pvid)
- return skb;
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
+
+ if (vid != pvid || skb->vlan_proto != htons(proto))
+ return;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
@@ -95,10 +130,56 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
- if (upper_dev)
+ if (!upper_dev)
+ __vlan_hwaccel_clear_tag(skb);
+}
+
+/**
+ * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Receive path method for switches which send some packets as VLAN-tagged
+ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
+ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
+ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
+ *
+ * As a side effect of this method, any VLAN tag from the skb head is moved
+ * to hwaccel.
+ */
+static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u16 vid, proto;
+ int err;
+
+ /* software untagging for standalone ports not yet necessary */
+ if (!br)
+ return skb;
+
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
return skb;
- __vlan_hwaccel_clear_tag(skb);
+ vid = skb_vlan_tag_get_id(skb);
+
+ if (br_vlan_enabled(br)) {
+ if (dp->ds->untag_vlan_aware_bridge_pvid)
+ dsa_software_untag_vlan_aware_bridge(skb, br, vid);
+ } else {
+ if (dp->ds->untag_bridge_pvid)
+ dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
+ }
return skb;
}
@@ -238,6 +319,24 @@ static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
return skb->data + 2 * ETH_ALEN;
}
+static inline unsigned long dsa_xmit_port_mask(const struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned long mask = BIT(dp->index);
+
+ if (IS_ENABLED(CONFIG_HSR) &&
+ unlikely(dev->features & NETIF_F_HW_HSR_DUP)) {
+ struct net_device *hsr_dev = dp->hsr_dev;
+ struct dsa_port *other_dp;
+
+ dsa_hsr_foreach_port(other_dp, dp->ds, hsr_dev)
+ mask |= BIT(other_dp->index);
+ }
+
+ return mask;
+}
+
/* Create 2 modaliases per tagging protocol, one to auto-load the module
* given the ID reported by get_tag_protocol(), and the other by name.
*/
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 71b26ae6db39..53e03fd8071b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
if (err) {
- refcount_inc(&v->refcount);
+ refcount_set(&v->refcount, 1);
return err;
}
@@ -286,7 +286,8 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
* be used for VLAN-unaware bridging.
*/
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
@@ -304,6 +305,8 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
+ *tx_fwd_offload = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
@@ -468,8 +471,8 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid)
+static struct net_device *
+dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid)
{
struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
@@ -495,30 +498,91 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
return NULL;
}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid)
+{
+ /* Always prefer precise source port information, if available */
+ if (source_port != -1 && switch_id != -1)
+ return dsa_conduit_find_user(conduit, switch_id, source_port);
+ else if (vbid >= 1)
+ return dsa_tag_8021q_find_port_by_vbid(conduit, vbid);
+
+ return dsa_find_designated_bridge_port_by_vid(conduit, vid);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_user);
+
+/**
+ * dsa_8021q_rcv - Decode source information from tag_8021q header
+ * @skb: RX socket buffer
+ * @source_port: pointer to storage for precise source port information.
+ * If this is known already from outside tag_8021q, the pre-initialized
+ * value is preserved. If not known, pass -1.
+ * @switch_id: similar to source_port.
+ * @vbid: pointer to storage for imprecise bridge ID. Must be pre-initialized
+ * with -1. If a positive value is returned, the source_port and switch_id
+ * are invalid.
+ * @vid: pointer to storage for original VID, in case tag_8021q decoding failed.
+ *
+ * If the packet has a tag_8021q header, decode it and set @source_port,
+ * @switch_id and @vbid, and strip the header. Otherwise set @vid and keep the
+ * header in the hwaccel area of the packet.
+ */
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid)
+ int *vbid, int *vid)
{
- u16 vid, tci;
+ int tmp_source_port, tmp_switch_id, tmp_vbid;
+ __be16 vlan_proto;
+ u16 tmp_vid, tci;
if (skb_vlan_tag_present(skb)) {
+ vlan_proto = skb->vlan_proto;
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+
+ vlan_proto = hdr->h_vlan_proto;
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
skb_pull_rcsum(skb, ETH_HLEN);
}
- vid = tci & VLAN_VID_MASK;
+ tmp_vid = tci & VLAN_VID_MASK;
+ if (!vid_is_dsa_8021q(tmp_vid)) {
+ /* Not a tag_8021q frame, so return the VID to the
+ * caller for further processing, and put the tag back
+ */
+ if (vid)
+ *vid = tmp_vid;
+
+ __vlan_hwaccel_put_tag(skb, vlan_proto, tci);
+
+ return;
+ }
- *source_port = dsa_8021q_rx_source_port(vid);
- *switch_id = dsa_8021q_rx_switch_id(vid);
+ tmp_source_port = dsa_8021q_rx_source_port(tmp_vid);
+ tmp_switch_id = dsa_8021q_rx_switch_id(tmp_vid);
+ tmp_vbid = dsa_tag_8021q_rx_vbid(tmp_vid);
+
+ /* Precise source port information is unknown when receiving from a
+ * VLAN-unaware bridging domain, and tmp_source_port and tmp_switch_id
+ * are zeroes in this case.
+ *
+ * Preserve the source information from hardware-specific mechanisms,
+ * if available. This allows us to not overwrite a valid source port
+ * and switch ID with less precise values.
+ */
+ if (tmp_vbid == 0 && *source_port == -1)
+ *source_port = tmp_source_port;
+ if (tmp_vbid == 0 && *switch_id == -1)
+ *switch_id = tmp_switch_id;
if (vbid)
- *vbid = dsa_tag_8021q_rx_vbid(vid);
+ *vbid = tmp_vbid;
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ return;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h
index 41f7167ac520..27b8906f99ec 100644
--- a/net/dsa/tag_8021q.h
+++ b/net/dsa/tag_8021q.h
@@ -14,10 +14,11 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid);
+ int *vbid, int *vid);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
- int vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 8c3c068728e5..cf9420439054 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -15,6 +15,7 @@
#define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy"
+#define BRCM_LEGACY_FCS_NAME "brcm-legacy-fcs"
#define BRCM_PREPEND_NAME "brcm-prepend"
/* Legacy Broadcom tag (6 bytes) */
@@ -32,6 +33,10 @@
#define BRCM_LEG_MULTICAST (1 << 5)
#define BRCM_LEG_EGRESS (2 << 5)
#define BRCM_LEG_INGRESS (3 << 5)
+#define BRCM_LEG_LEN_HI(x) (((x) >> 8) & 0x7)
+
+/* 4th byte in the tag */
+#define BRCM_LEG_LEN_LO(x) ((x) & 0xff)
/* 6th byte in the tag */
#define BRCM_LEG_PORT_ID (0xf)
@@ -87,6 +92,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
{
struct dsa_port *dp = dsa_user_to_port(dev);
u16 queue = skb_get_queue_mapping(skb);
+ u16 port_mask;
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
@@ -114,10 +120,9 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) |
((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT);
brcm_tag[1] = 0;
- brcm_tag[2] = 0;
- if (dp->index == 8)
- brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
- brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK;
+ port_mask = dsa_xmit_port_mask(skb, dev);
+ brcm_tag[2] = (port_mask >> 8) & BRCM_IG_DSTMAP2_MASK;
+ brcm_tag[3] = port_mask & BRCM_IG_DSTMAP1_MASK;
/* Now tell the conduit network device about the desired output queue
* as well
@@ -171,7 +176,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
- dsa_default_offload_fwd_mark(skb);
+ if (likely(!is_link_local_ether_addr(eth_hdr(skb)->h_dest)))
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
@@ -212,6 +218,48 @@ DSA_TAG_DRIVER(brcm_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) || \
+ IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int len = BRCM_LEG_TAG_LEN;
+ int source_port;
+ __be16 *proto;
+ u8 *brcm_tag;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ return NULL;
+
+ brcm_tag = dsa_etype_header_pos_rx(skb);
+ proto = (__be16 *)(brcm_tag + BRCM_LEG_TAG_LEN);
+
+ source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /* The internal switch in BCM63XX SoCs always tags on egress on the CPU
+ * port. We use VID 0 internally for untagged traffic, so strip the tag
+ * if the TCI field is all 0, and keep it otherwise to also retain
+ * e.g. 802.1p tagged packets.
+ */
+ if (proto[0] == htons(ETH_P_8021Q) && proto[1] == 0)
+ len += VLAN_HLEN;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, len);
+
+ if (likely(!is_link_local_ether_addr(eth_hdr(skb)->h_dest)))
+ dsa_default_offload_fwd_mark(skb);
+
+ dsa_strip_etype_header(skb, len);
+
+ return skb;
+}
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY || CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
+
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -250,49 +298,77 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
return skb;
}
-static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
- struct net_device *dev)
+static const struct dsa_device_ops brcm_legacy_netdev_ops = {
+ .name = BRCM_LEGACY_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY,
+ .xmit = brcm_leg_tag_xmit,
+ .rcv = brcm_leg_tag_rcv,
+ .needed_headroom = BRCM_LEG_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_fcs_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int len = BRCM_LEG_TAG_LEN;
- int source_port;
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned int fcs_len;
+ __le32 fcs_val;
u8 *brcm_tag;
- if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID)))
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 70 bytes (including FCS
+ * and tag) because the length verification is done after the Broadcom
+ * tag is stripped off the ingress packet.
+ *
+ * Let dsa_user_xmit() free the SKB.
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
- brcm_tag = dsa_etype_header_pos_rx(skb);
+ fcs_len = skb->len;
+ fcs_val = cpu_to_le32(crc32_le(~0, skb->data, fcs_len) ^ ~0);
- source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+ skb_push(skb, BRCM_LEG_TAG_LEN);
- skb->dev = dsa_conduit_find_user(dev, 0, source_port);
- if (!skb->dev)
- return NULL;
+ dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN);
- /* VLAN tag is added by BCM63xx internal switch */
- if (netdev_uses_dsa(skb->dev))
- len += VLAN_HLEN;
+ brcm_tag = skb->data + 2 * ETH_ALEN;
- /* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, len);
+ /* Broadcom tag type */
+ brcm_tag[0] = BRCM_LEG_TYPE_HI;
+ brcm_tag[1] = BRCM_LEG_TYPE_LO;
- dsa_default_offload_fwd_mark(skb);
+ /* Broadcom tag value */
+ brcm_tag[2] = BRCM_LEG_EGRESS | BRCM_LEG_LEN_HI(fcs_len);
+ brcm_tag[3] = BRCM_LEG_LEN_LO(fcs_len);
+ brcm_tag[4] = 0;
+ brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID;
- dsa_strip_etype_header(skb, len);
+ /* Original FCS value */
+ if (__skb_pad(skb, ETH_FCS_LEN, false))
+ return NULL;
+ skb_put_data(skb, &fcs_val, ETH_FCS_LEN);
return skb;
}
-static const struct dsa_device_ops brcm_legacy_netdev_ops = {
- .name = BRCM_LEGACY_NAME,
- .proto = DSA_TAG_PROTO_BRCM_LEGACY,
- .xmit = brcm_leg_tag_xmit,
+static const struct dsa_device_ops brcm_legacy_fcs_netdev_ops = {
+ .name = BRCM_LEGACY_FCS_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY_FCS,
+ .xmit = brcm_leg_fcs_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
-DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
-#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+DSA_TAG_DRIVER(brcm_legacy_fcs_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY_FCS, BRCM_LEGACY_FCS_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
@@ -328,6 +404,9 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
&DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops),
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+ &DSA_TAG_DRIVER_NAME(brcm_legacy_fcs_netdev_ops),
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
#endif
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index 51a1f46a567f..5fa436121087 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -48,8 +48,7 @@
/* Byte 3 */
#define GSWIP_TX_DPID_EN BIT(0)
-#define GSWIP_TX_PORT_MAP_SHIFT 1
-#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
+#define GSWIP_TX_PORT_MAP GENMASK(6, 1)
#define GSWIP_RX_HEADER_LEN 8
@@ -61,7 +60,6 @@
static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
u8 *gswip_tag;
skb_push(skb, GSWIP_TX_HEADER_LEN);
@@ -70,7 +68,7 @@ static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
gswip_tag[0] = GSWIP_TX_SLPID_CPU;
gswip_tag[1] = GSWIP_TX_DPID_ELAN;
gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
- gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
+ gswip_tag[3] = FIELD_PREP(GSWIP_TX_PORT_MAP, dsa_xmit_port_mask(skb, dev));
gswip_tag[3] |= GSWIP_TX_DPID_EN;
return skb;
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index 663b25785d95..544ab15685a2 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -20,7 +20,6 @@
static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
u8 *tag;
/* Calculate checksums (if required) before adding the trailer tag to
@@ -33,7 +32,7 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
/* Tag encoding */
tag = skb_put(skb, HELLCREEK_TAG_LEN);
- *tag = BIT(dp->index);
+ *tag = dsa_xmit_port_mask(skb, dev);
return skb;
}
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index ee7b272ab715..9170a0148cc4 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -66,7 +66,7 @@ static int ksz_connect(struct dsa_switch *ds)
if (!priv)
return -ENOMEM;
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
ret = PTR_ERR(xmit_worker);
@@ -111,15 +111,15 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag0 : zero-based value represents port
- * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ * (eg, 0x0=port1, 0x2=port3, 0x3=port4)
*/
+#define KSZ8795_TAIL_TAG_EG_PORT_M GENMASK(1, 0)
#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
struct ethhdr *hdr;
u8 *tag;
@@ -130,7 +130,7 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
- *tag = 1 << dp->index;
+ *tag = dsa_xmit_port_mask(skb, dev);
if (is_link_local_ether_addr(hdr->h_dest))
*tag |= KSZ8795_TAIL_TAG_OVERRIDE;
@@ -139,9 +139,15 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
{
- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ u8 *tag;
+
+ if (skb_linearize(skb))
+ return NULL;
- return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+
+ return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
+ KSZ_EGRESS_TAG_LEN);
}
static const struct dsa_device_ops ksz8795_netdev_ops = {
@@ -176,8 +182,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
@@ -286,21 +293,12 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
- val = BIT(dp->index);
-
+ val = dsa_xmit_port_mask(skb, dev);
val |= FIELD_PREP(KSZ9477_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
val |= KSZ9477_TAIL_TAG_OVERRIDE;
- if (dev->features & NETIF_F_HW_HSR_DUP) {
- struct net_device *hsr_dev = dp->hsr_dev;
- struct dsa_port *other_dp;
-
- dsa_hsr_foreach_port(other_dp, dp->ds, hsr_dev)
- val |= BIT(other_dp->index);
- }
-
*tag = cpu_to_be16(val);
return ksz_defer_xmit(dp, skb);
@@ -308,10 +306,16 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
- /* Tag decoding */
- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
unsigned int len = KSZ_EGRESS_TAG_LEN;
+ unsigned int port;
+ u8 *tag;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ /* Tag decoding */
+ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
/* Extra 4-bytes PTP timestamp */
if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
@@ -357,8 +361,7 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
hdr = skb_eth_hdr(skb);
- *tag = BIT(dp->index);
-
+ *tag = dsa_xmit_port_mask(skb, dev);
*tag |= FIELD_PREP(KSZ9893_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
@@ -422,8 +425,7 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
tag = skb_put(skb, LAN937X_EGRESS_TAG_LEN);
- val = BIT(dp->index);
-
+ val = dsa_xmit_port_mask(skb, dev);
val |= FIELD_PREP(LAN937X_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index b670e3c53e91..dea3eecaf093 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -54,7 +54,8 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
* whether that's a combined special tag with 802.1Q header.
*/
mtk_tag[0] = xmit_tpid;
- mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+ mtk_tag[1] = FIELD_PREP(MTK_HDR_XMIT_DP_BIT_MASK,
+ dsa_xmit_port_mask(skb, dev));
/* Tag control information is kept for 802.1Q */
if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
diff --git a/net/dsa/tag_mxl-gsw1xx.c b/net/dsa/tag_mxl-gsw1xx.c
new file mode 100644
index 000000000000..60f7c445e656
--- /dev/null
+++ b/net/dsa/tag_mxl-gsw1xx.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DSA driver Special Tag support for MaxLinear GSW1xx switch chips
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "tag.h"
+
+/* To define the outgoing port and to discover the incoming port a special
+ * tag is used by the GSW1xx.
+ *
+ * Dest MAC Src MAC special TAG EtherType
+ * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 5 6 7 8 | 1 2 |...
+ * |<--------------->|
+ */
+
+#define GSW1XX_TAG_NAME "gsw1xx"
+
+/* special tag header length (RX and TX) */
+#define GSW1XX_HEADER_LEN 8
+
+/* Word 0 = Ethertype -> 0x88C3 */
+
+/* Word 1 */
+#define GSW1XX_TX_PORT_MAP GENMASK(7, 0)
+#define GSW1XX_TX_PORT_MAP_EN BIT(15)
+#define GSW1XX_TX_CLASS_EN BIT(14)
+#define GSW1XX_TX_TIME_STAMP_EN BIT(13)
+#define GSW1XX_TX_LRN_DIS BIT(12)
+#define GSW1XX_TX_CLASS GENMASK(11, 8)
+
+/* special tag in RX path header */
+/* Word 2 */
+#define GSW1XX_RX_PORT_MAP GENMASK(15, 8)
+
+static struct sk_buff *gsw1xx_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ __be16 *gsw1xx_tag;
+ u16 tag;
+
+ /* provide additional space 'GSW1XX_HEADER_LEN' bytes */
+ skb_push(skb, GSW1XX_HEADER_LEN);
+
+ /* add space between MAC address and Ethertype */
+ dsa_alloc_etype_header(skb, GSW1XX_HEADER_LEN);
+
+ /* special tag ingress */
+ gsw1xx_tag = dsa_etype_header_pos_tx(skb);
+ gsw1xx_tag[0] = htons(ETH_P_MXLGSW);
+
+ tag = FIELD_PREP(GSW1XX_TX_PORT_MAP, dsa_xmit_port_mask(skb, dev)) |
+ GSW1XX_TX_PORT_MAP_EN | GSW1XX_TX_LRN_DIS;
+ gsw1xx_tag[1] = htons(tag);
+ gsw1xx_tag[2] = 0;
+ gsw1xx_tag[3] = 0;
+
+ return skb;
+}
+
+static struct sk_buff *gsw1xx_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int port;
+ __be16 *gsw1xx_tag;
+
+ if (unlikely(!pskb_may_pull(skb, GSW1XX_HEADER_LEN))) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull SKB\n");
+ return NULL;
+ }
+
+ gsw1xx_tag = dsa_etype_header_pos_rx(skb);
+
+ if (unlikely(ntohs(gsw1xx_tag[0]) != ETH_P_MXLGSW)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid special tag\n");
+ dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag);
+ return NULL;
+ }
+
+ /* Get source port information */
+ port = FIELD_GET(GSW1XX_RX_PORT_MAP, ntohs(gsw1xx_tag[1]));
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
+ dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag);
+ return NULL;
+ }
+
+ /* remove the GSW1xx special tag between MAC addresses and the current
+ * ethertype field.
+ */
+ skb_pull_rcsum(skb, GSW1XX_HEADER_LEN);
+ dsa_strip_etype_header(skb, GSW1XX_HEADER_LEN);
+
+ return skb;
+}
+
+static const struct dsa_device_ops gsw1xx_netdev_ops = {
+ .name = GSW1XX_TAG_NAME,
+ .proto = DSA_TAG_PROTO_MXL_GSW1XX,
+ .xmit = gsw1xx_tag_xmit,
+ .rcv = gsw1xx_tag_rcv,
+ .needed_headroom = GSW1XX_HEADER_LEN,
+};
+
+MODULE_DESCRIPTION("DSA tag driver for MaxLinear GSW1xx 8 byte protocol");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MXL_GSW1XX, GSW1XX_TAG_NAME);
+
+module_dsa_tag_driver(gsw1xx_netdev_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index e0e4300bfbd3..3405def79c2d 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -8,40 +8,6 @@
#define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville"
-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
- * payload and move it into the DSA tag, which will make the switch classify
- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
- * which is the pvid of standalone and VLAN-unaware bridge ports.
- */
-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
- u64 *vlan_tci, u64 *tag_type)
-{
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct vlan_ethhdr *hdr;
- u16 proto, tci;
-
- if (!br || !br_vlan_enabled(br)) {
- *vlan_tci = 0;
- *tag_type = IFH_TAG_TYPE_C;
- return;
- }
-
- hdr = skb_vlan_eth_hdr(skb);
- br_vlan_get_proto(br, &proto);
-
- if (ntohs(hdr->h_vlan_proto) == proto) {
- vlan_remove_tag(skb, &tci);
- *vlan_tci = tci;
- } else {
- rcu_read_lock();
- br_vlan_get_pvid_rcu(br, &tci);
- rcu_read_unlock();
- *vlan_tci = tci;
- }
-
- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
-}
-
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
u32 rew_op = 0;
u64 qos_class;
- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
+ &tag_type);
qos_class = netdev_get_num_tc(netdev) ?
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
@@ -79,11 +46,10 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_user_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x8880000a), &injection);
- ocelot_ifh_set_dest(injection, BIT_ULL(dp->index));
+ ocelot_ifh_set_dest(injection, dsa_xmit_port_mask(skb, netdev));
return skb;
}
@@ -91,11 +57,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *seville_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_user_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x88800005), &injection);
- seville_ifh_set_dest(injection, BIT_ULL(dp->index));
+ seville_ifh_set_dest(injection, dsa_xmit_port_mask(skb, netdev));
return skb;
}
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index b059381310fe..3929584791e4 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -79,9 +79,9 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int src_port, switch_id;
+ int src_port = -1, switch_id = -1;
- dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
+ dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL);
skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port);
if (!skb->dev)
@@ -110,7 +110,7 @@ static int ocelot_connect(struct dsa_switch *ds)
if (!priv)
return -ENOMEM;
- priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker);
kfree(priv);
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 0cf61286b426..6d56a28c914c 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -14,7 +14,6 @@
static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *phdr;
u16 hdr;
@@ -26,7 +25,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set the version field, and set destination port information */
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= QCA_HDR_XMIT_FROM_CPU;
- hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, dsa_xmit_port_mask(skb, dev));
*phdr = htons(hdr);
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index feaefa0e179b..3cc63eacfa03 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -57,7 +57,7 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT);
/* The lower bits indicate the port number */
- out |= BIT(dp->index);
+ out |= dsa_xmit_port_mask(skb, dev);
p = (__be16 *)(tag + 2);
*p = htons(out);
diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c
index 15c2bae2b429..2464545da4d2 100644
--- a/net/dsa/tag_rtl8_4.c
+++ b/net/dsa/tag_rtl8_4.c
@@ -103,7 +103,6 @@
static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
void *tag)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
__be16 tag16[RTL8_4_TAG_LEN / 2];
/* Set Realtek EtherType */
@@ -116,7 +115,7 @@ static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
tag16[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1));
/* Zero ALLOW; set RX (CPU->switch) forwarding port mask */
- tag16[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index)));
+ tag16[3] = htons(FIELD_PREP(RTL8_4_RX, dsa_xmit_port_mask(skb, dev)));
memcpy(tag, tag16, RTL8_4_TAG_LEN);
}
diff --git a/net/dsa/tag_rzn1_a5psw.c b/net/dsa/tag_rzn1_a5psw.c
index 69d51221b1e5..10994b3470f6 100644
--- a/net/dsa/tag_rzn1_a5psw.c
+++ b/net/dsa/tag_rzn1_a5psw.c
@@ -39,7 +39,6 @@ struct a5psw_tag {
static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
struct a5psw_tag *ptag;
u32 data2_val;
@@ -60,7 +59,7 @@ static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *de
ptag = dsa_etype_header_pos_tx(skb);
- data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, BIT(dp->index));
+ data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, dsa_xmit_port_mask(skb, dev));
ptag->ctrl_tag = htons(ETH_P_DSA_A5PSW);
ptag->ctrl_data = htons(A5PSW_CTRL_DATA_FORCE_FORWARD);
ptag->ctrl_data2_lo = htons(data2_val);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 1aba1d05c27a..02adec693811 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -472,37 +472,14 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
-/* If the VLAN in the packet is a tag_8021q one, set @source_port and
- * @switch_id and strip the header. Otherwise set @vid and keep it in the
- * packet.
- */
-static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
- int *switch_id, int *vbid, u16 *vid)
-{
- struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
- u16 vlan_tci;
-
- if (skb_vlan_tag_present(skb))
- vlan_tci = skb_vlan_tag_get(skb);
- else
- vlan_tci = ntohs(hdr->h_vlan_TCI);
-
- if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
- return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
-
- /* Try our best with imprecise RX */
- *vid = vlan_tci & VLAN_VID_MASK;
-}
-
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
- u16 vid;
hdr = eth_hdr(skb);
is_link_local = sja1105_is_link_local(skb);
@@ -524,37 +501,16 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
/* Normal data plane traffic and link-local frames are tagged with
* a tag_8021q VLAN which we have to strip
*/
- if (sja1105_skb_has_tag_8021q(skb)) {
- int tmp_source_port = -1, tmp_switch_id = -1;
-
- sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid,
- &vid);
- /* Preserve the source information from the INCL_SRCPT option,
- * if available. This allows us to not overwrite a valid source
- * port and switch ID with zeroes when receiving link-local
- * frames from a VLAN-unaware bridged port (non-zero vbid) or a
- * VLAN-aware bridged port (non-zero vid). Furthermore, the
- * tag_8021q source port information is only of trust when the
- * vbid is 0 (precise port). Otherwise, tmp_source_port and
- * tmp_switch_id will be zeroes.
- */
- if (vbid == 0 && source_port == -1)
- source_port = tmp_source_port;
- if (vbid == 0 && switch_id == -1)
- switch_id = tmp_switch_id;
- } else if (source_port == -1 && switch_id == -1) {
+ if (sja1105_skb_has_tag_8021q(skb))
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+ else if (source_port == -1 && switch_id == -1)
/* Packets with no source information have no chance of
* getting accepted, drop them straight away.
*/
return NULL;
- }
- if (source_port != -1 && switch_id != -1)
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
- else if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -677,9 +633,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
bool host_only = false;
- u16 vid = 0;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
@@ -691,14 +646,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
-
- if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
- else
- skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
+
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -755,7 +707,7 @@ static int sja1105_connect(struct dsa_switch *ds)
spin_lock_init(&priv->meta_lock);
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 22742a53d6f4..4dce24cfe6a7 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -14,12 +14,11 @@
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_user_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 4);
trailer[0] = 0x80;
- trailer[1] = 1 << dp->index;
+ trailer[1] = dsa_xmit_port_mask(skb, dev);
trailer[2] = 0x10;
trailer[3] = 0x00;
diff --git a/net/dsa/tag_vsc73xx_8021q.c b/net/dsa/tag_vsc73xx_8021q.c
new file mode 100644
index 000000000000..af121a9aff7f
--- /dev/null
+++ b/net/dsa/tag_vsc73xx_8021q.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (C) 2024 Pawel Dembicki <paweldembicki@gmail.com>
+ */
+#include <linux/dsa/8021q.h>
+
+#include "tag.h"
+#include "tag_8021q.h"
+
+#define VSC73XX_8021Q_NAME "vsc73xx-8021q"
+
+static struct sk_buff *
+vsc73xx_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_user_to_port(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
+ u8 pcp;
+
+ if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br_vlan_enabled(br))
+ return skb;
+
+ tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
+ }
+
+ pcp = netdev_txq_to_tc(netdev, queue_mapping);
+
+ return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+}
+
+static struct sk_buff *
+vsc73xx_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ int src_port = -1, switch_id = -1, vbid = -1, vid = -1;
+
+ dsa_8021q_rcv(skb, &src_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, src_port, switch_id,
+ vid, vbid);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port\n");
+ return NULL;
+ }
+
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops vsc73xx_8021q_netdev_ops = {
+ .name = VSC73XX_8021Q_NAME,
+ .proto = DSA_TAG_PROTO_VSC73XX_8021Q,
+ .xmit = vsc73xx_xmit,
+ .rcv = vsc73xx_rcv,
+ .needed_headroom = VLAN_HLEN,
+ .promisc_on_conduit = true,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DSA tag driver for VSC73XX family of switches, using VLAN");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_VSC73XX_8021Q, VSC73XX_8021Q_NAME);
+
+module_dsa_tag_driver(vsc73xx_8021q_netdev_ops);
diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c
index 68d4633ddd5e..a05219f702c6 100644
--- a/net/dsa/tag_xrs700x.c
+++ b/net/dsa/tag_xrs700x.c
@@ -13,16 +13,10 @@
static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *partner, *dp = dsa_user_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 1);
- trailer[0] = BIT(dp->index);
-
- if (dp->hsr_dev)
- dsa_hsr_foreach_port(partner, dp->ds, dp->hsr_dev)
- if (partner != dp)
- trailer[0] |= BIT(partner->index);
+ trailer[0] = dsa_xmit_port_mask(skb, dev);
return skb;
}
diff --git a/net/dsa/tag_yt921x.c b/net/dsa/tag_yt921x.c
new file mode 100644
index 000000000000..6bbfd42dc5df
--- /dev/null
+++ b/net/dsa/tag_yt921x.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Motorcomm YT921x Switch Extended CPU Port Tagging
+ *
+ * Copyright (c) 2025 David Yang <mmyangfl@gmail.com>
+ *
+ * +----+----+-------+-----+----+---------
+ * | DA | SA | TagET | Tag | ET | Payload ...
+ * +----+----+-------+-----+----+---------
+ * 6 6 2 6 2 N
+ *
+ * Tag Ethertype: CPU_TAG_TPID_TPID (default: ETH_P_YT921X = 0x9988)
+ * * Hardcoded for the moment, but still configurable. Discuss it if there
+ * are conflicts somewhere and/or you want to change it for some reason.
+ * Tag:
+ * 2: VLAN Tag
+ * 2: Rx Port
+ * 15b: Rx Port Valid
+ * 14b-11b: Rx Port
+ * 10b-0b: Cmd?
+ * 2: Tx Port(s)
+ * 15b: Tx Port(s) Valid
+ * 10b-0b: Tx Port(s) Mask
+ */
+
+#include <linux/etherdevice.h>
+
+#include "tag.h"
+
+#define YT921X_TAG_NAME "yt921x"
+
+#define YT921X_TAG_LEN 8
+
+#define YT921X_TAG_PORT_EN BIT(15)
+#define YT921X_TAG_RX_PORT_M GENMASK(14, 11)
+#define YT921X_TAG_RX_CMD_M GENMASK(10, 0)
+#define YT921X_TAG_RX_CMD(x) FIELD_PREP(YT921X_TAG_RX_CMD_M, (x))
+#define YT921X_TAG_RX_CMD_FORWARDED 0x80
+#define YT921X_TAG_RX_CMD_UNK_UCAST 0xb2
+#define YT921X_TAG_RX_CMD_UNK_MCAST 0xb4
+#define YT921X_TAG_TX_PORTS GENMASK(10, 0)
+
+static struct sk_buff *
+yt921x_tag_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ __be16 *tag;
+ u16 tx;
+
+ skb_push(skb, YT921X_TAG_LEN);
+ dsa_alloc_etype_header(skb, YT921X_TAG_LEN);
+
+ tag = dsa_etype_header_pos_tx(skb);
+
+ tag[0] = htons(ETH_P_YT921X);
+ /* VLAN tag unrelated when TX */
+ tag[1] = 0;
+ tag[2] = 0;
+ tx = FIELD_PREP(YT921X_TAG_TX_PORTS, dsa_xmit_port_mask(skb, netdev)) |
+ YT921X_TAG_PORT_EN;
+ tag[3] = htons(tx);
+
+ return skb;
+}
+
+static struct sk_buff *
+yt921x_tag_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ unsigned int port;
+ __be16 *tag;
+ u16 cmd;
+ u16 rx;
+
+ if (unlikely(!pskb_may_pull(skb, YT921X_TAG_LEN)))
+ return NULL;
+
+ tag = dsa_etype_header_pos_rx(skb);
+
+ if (unlikely(tag[0] != htons(ETH_P_YT921X))) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected EtherType 0x%04x\n",
+ ntohs(tag[0]));
+ return NULL;
+ }
+
+ /* Locate which port this is coming from */
+ rx = ntohs(tag[2]);
+ if (unlikely((rx & YT921X_TAG_PORT_EN) == 0)) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected rx tag 0x%04x\n", rx);
+ return NULL;
+ }
+
+ port = FIELD_GET(YT921X_TAG_RX_PORT_M, rx);
+ skb->dev = dsa_conduit_find_user(netdev, 0, port);
+ if (unlikely(!skb->dev)) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port %u\n", port);
+ return NULL;
+ }
+
+ cmd = FIELD_GET(YT921X_TAG_RX_CMD_M, rx);
+ switch (cmd) {
+ case YT921X_TAG_RX_CMD_FORWARDED:
+ /* Already forwarded by hardware */
+ dsa_default_offload_fwd_mark(skb);
+ break;
+ case YT921X_TAG_RX_CMD_UNK_UCAST:
+ case YT921X_TAG_RX_CMD_UNK_MCAST:
+ /* NOTE: hardware doesn't distinguish between TRAP (copy to CPU
+ * only) and COPY (forward and copy to CPU). In order to perform
+ * a soft switch, NEVER use COPY action in the switch driver.
+ */
+ break;
+ default:
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected rx cmd 0x%02x\n", cmd);
+ break;
+ }
+
+ /* Remove YT921x tag and update checksum */
+ skb_pull_rcsum(skb, YT921X_TAG_LEN);
+ dsa_strip_etype_header(skb, YT921X_TAG_LEN);
+
+ return skb;
+}
+
+static const struct dsa_device_ops yt921x_netdev_ops = {
+ .name = YT921X_TAG_NAME,
+ .proto = DSA_TAG_PROTO_YT921X,
+ .xmit = yt921x_tag_xmit,
+ .rcv = yt921x_tag_rcv,
+ .needed_headroom = YT921X_TAG_LEN,
+};
+
+MODULE_DESCRIPTION("DSA tag driver for Motorcomm YT921x switches");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_YT921X, YT921X_TAG_NAME);
+
+module_dsa_tag_driver(yt921x_netdev_ops);
diff --git a/net/dsa/user.c b/net/dsa/user.c
index e8f56a40b614..f59d66f0975d 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -515,12 +515,13 @@ dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct dsa_user_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -577,20 +578,6 @@ dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_user_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- int port = p->dp->index;
-
- /* Pass through to switch driver if it supports timestamping */
- switch (cmd) {
- case SIOCGHWTSTAMP:
- if (ds->ops->port_hwtstamp_get)
- return ds->ops->port_hwtstamp_get(ds, port, ifr);
- break;
- case SIOCSHWTSTAMP:
- if (ds->ops->port_hwtstamp_set)
- return ds->ops->port_hwtstamp_set(ds, port, ifr);
- break;
- }
return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
}
@@ -896,7 +883,7 @@ static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
{
struct dsa_switch *ds = p->dp->ds;
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NOBPF))
return;
if (!ds->ops->port_txtstamp)
@@ -1042,15 +1029,12 @@ static void dsa_user_get_strings(struct net_device *dev,
struct dsa_switch *ds = dp->ds;
if (stringset == ETH_SS_STATS) {
- int len = ETH_GSTRING_LEN;
-
- strscpy_pad(data, "tx_packets", len);
- strscpy_pad(data + len, "tx_bytes", len);
- strscpy_pad(data + 2 * len, "rx_packets", len);
- strscpy_pad(data + 3 * len, "rx_bytes", len);
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "rx_bytes");
if (ds->ops->get_strings)
- ds->ops->get_strings(ds, dp->index, stringset,
- data + 4 * len);
+ ds->ops->get_strings(ds, dp->index, stringset, data);
} else if (stringset == ETH_SS_TEST) {
net_selftest_get_strings(data);
}
@@ -1152,6 +1136,16 @@ dsa_user_get_rmon_stats(struct net_device *dev,
ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
}
+static void dsa_user_get_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_ts_stats)
+ ds->ops->get_ts_stats(ds, dp->index, ts_stats);
+}
+
static void dsa_user_net_selftest(struct net_device *ndev,
struct ethtool_test *etest, u64 *buf)
{
@@ -1231,16 +1225,29 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
struct dsa_switch *ds = dp->ds;
int ret;
- /* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
- return -ENODEV;
-
- if (!ds->ops->set_mac_eee)
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
return -EOPNOTSUPP;
- ret = ds->ops->set_mac_eee(ds, dp->index, e);
- if (ret)
- return ret;
+ /* If the port is using phylink managed EEE, then an unimplemented
+ * set_mac_eee() is permissible.
+ */
+ if (!phylink_mac_implements_lpi(ds->phylink_mac_ops)) {
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!ds->ops->set_mac_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->ops->set_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+ } else if (ds->ops->set_mac_eee) {
+ ret = ds->ops->set_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+ }
return phylink_ethtool_set_eee(dp->pl, e);
}
@@ -1249,18 +1256,14 @@ static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- int ret;
-
- /* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
- return -ENODEV;
- if (!ds->ops->get_mac_eee)
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
return -EOPNOTSUPP;
- ret = ds->ops->get_mac_eee(ds, dp->index, e);
- if (ret)
- return ret;
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev)
+ return -ENODEV;
return phylink_ethtool_get_eee(dp->pl, e);
}
@@ -1308,8 +1311,7 @@ static int dsa_user_set_pauseparam(struct net_device *dev,
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int dsa_user_netpoll_setup(struct net_device *dev,
- struct netpoll_info *ni)
+static int dsa_user_netpoll_setup(struct net_device *dev)
{
struct net_device *conduit = dsa_user_to_conduit(dev);
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1365,7 +1367,7 @@ dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
static int
dsa_user_add_cls_matchall_mirred(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
- bool ingress)
+ bool ingress, bool ingress_target)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct dsa_port *dp = dsa_user_to_port(dev);
@@ -1377,11 +1379,19 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
struct dsa_port *to_dp;
int err;
- if (!ds->ops->port_mirror_add)
+ if (cls->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload \"protocol all\" matchall filter");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ds->ops->port_mirror_add) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch does not support mirroring operation");
return -EOPNOTSUPP;
+ }
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
act = &cls->rule->action.entries[0];
@@ -1389,8 +1399,36 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
if (!act->dev)
return -EINVAL;
- if (!dsa_user_dev_check(act->dev))
+ if (dsa_user_dev_check(act->dev)) {
+ if (ingress_target) {
+ /* We can only fulfill this using software assist */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to ingress of DSA user port if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ } else {
+ to_dp = dsa_user_to_port(act->dev);
+ }
+ } else {
+ /* Handle mirroring to foreign target ports as a mirror towards
+ * the CPU. The software tc rule will take the packets from
+ * there.
+ */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to CPU if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ }
+
+ if (dp->ds != to_dp->ds) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cross-chip mirroring not implemented");
return -EOPNOTSUPP;
+ }
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
@@ -1399,9 +1437,6 @@ dsa_user_add_cls_matchall_mirred(struct net_device *dev,
mall_tc_entry->cookie = cls->cookie;
mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
-
- to_dp = dsa_user_to_port(act->dev);
-
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
@@ -1442,8 +1477,7 @@ dsa_user_add_cls_matchall_police(struct net_device *dev,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
@@ -1481,17 +1515,30 @@ static int dsa_user_add_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
bool ingress)
{
- int err = -EOPNOTSUPP;
+ const struct flow_action *action = &cls->rule->action;
+ struct netlink_ext_ack *extack = cls->common.extack;
- if (cls->common.protocol == htons(ETH_P_ALL) &&
- flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
- err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress);
- else if (flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
- err = dsa_user_add_cls_matchall_police(dev, cls, ingress);
+ if (!flow_offload_has_one_action(action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload matchall filter with more than one action");
+ return -EOPNOTSUPP;
+ }
- return err;
+ switch (action->entries[0].id) {
+ case FLOW_ACTION_MIRRED:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ false);
+ case FLOW_ACTION_MIRRED_INGRESS:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ true);
+ case FLOW_ACTION_POLICE:
+ return dsa_user_add_cls_matchall_police(dev, cls, ingress);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown action");
+ break;
+ }
+
+ return -EOPNOTSUPP;
}
static void dsa_user_del_cls_matchall(struct net_device *dev,
@@ -1729,7 +1776,7 @@ static int dsa_user_set_rxnfc(struct net_device *dev,
}
static int dsa_user_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts)
+ struct kernel_ethtool_ts_info *ts)
{
struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
@@ -2459,6 +2506,7 @@ static const struct ethtool_ops dsa_user_ethtool_ops = {
.get_eth_mac_stats = dsa_user_get_eth_mac_stats,
.get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
.get_rmon_stats = dsa_user_get_rmon_stats,
+ .get_ts_stats = dsa_user_get_ts_stats,
.set_wol = dsa_user_set_wol,
.get_wol = dsa_user_get_wol,
.set_eee = dsa_user_set_eee,
@@ -2512,6 +2560,31 @@ static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
return 0;
}
+static int dsa_user_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_hwtstamp_get(ds, dp->index, cfg);
+}
+
+static int dsa_user_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_hwtstamp_set)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_hwtstamp_set(ds, dp->index, cfg, extack);
+}
+
static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_open = dsa_user_open,
.ndo_stop = dsa_user_close,
@@ -2533,6 +2606,8 @@ static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
.ndo_change_mtu = dsa_user_change_mtu,
.ndo_fill_forward_path = dsa_user_fill_forward_path,
+ .ndo_hwtstamp_get = dsa_user_hwtstamp_get,
+ .ndo_hwtstamp_set = dsa_user_hwtstamp_set,
};
static const struct device_type dsa_type = {
@@ -2642,11 +2717,12 @@ void dsa_user_setup_tagger(struct net_device *user)
user->features = conduit->vlan_features | NETIF_F_HW_TC;
user->hw_features |= NETIF_F_HW_TC;
- user->features |= NETIF_F_LLTX;
if (user->needed_tailroom)
user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ user->lltx = true;
}
int dsa_user_suspend(struct net_device *user_dev)
@@ -3528,7 +3604,7 @@ static int dsa_user_netdevice_event(struct notifier_block *nb,
list_add(&dp->user->close_list, &close_list);
}
- dev_close_many(&close_list, true);
+ netif_close_many(&close_list, true);
return NOTIFY_OK;
}