summaryrefslogtreecommitdiff
path: root/net/dsa
diff options
context:
space:
mode:
Diffstat (limited to 'net/dsa')
-rw-r--r--net/dsa/Kconfig38
-rw-r--r--net/dsa/Makefile19
-rw-r--r--net/dsa/conduit.c (renamed from net/dsa/master.c)318
-rw-r--r--net/dsa/conduit.h22
-rw-r--r--net/dsa/devlink.c29
-rw-r--r--net/dsa/dsa.c406
-rw-r--r--net/dsa/dsa.h12
-rw-r--r--net/dsa/master.h19
-rw-r--r--net/dsa/netlink.c22
-rw-r--r--net/dsa/port.c476
-rw-r--r--net/dsa/port.h9
-rw-r--r--net/dsa/slave.h69
-rw-r--r--net/dsa/stubs.c10
-rw-r--r--net/dsa/switch.c109
-rw-r--r--net/dsa/switch.h11
-rw-r--r--net/dsa/tag.c17
-rw-r--r--net/dsa/tag.h185
-rw-r--r--net/dsa/tag_8021q.c110
-rw-r--r--net/dsa/tag_8021q.h7
-rw-r--r--net/dsa/tag_ar9331.c5
-rw-r--r--net/dsa/tag_brcm.c148
-rw-r--r--net/dsa/tag_dsa.c7
-rw-r--r--net/dsa/tag_gswip.c9
-rw-r--r--net/dsa/tag_hellcreek.c6
-rw-r--r--net/dsa/tag_ksz.c277
-rw-r--r--net/dsa/tag_lan9303.c5
-rw-r--r--net/dsa/tag_mtk.c8
-rw-r--r--net/dsa/tag_mxl-gsw1xx.c117
-rw-r--r--net/dsa/tag_none.c7
-rw-r--r--net/dsa/tag_ocelot.c62
-rw-r--r--net/dsa/tag_ocelot_8021q.c19
-rw-r--r--net/dsa/tag_qca.c16
-rw-r--r--net/dsa/tag_rtl4_a.c14
-rw-r--r--net/dsa/tag_rtl8_4.c8
-rw-r--r--net/dsa/tag_rzn1_a5psw.c6
-rw-r--r--net/dsa/tag_sja1105.c151
-rw-r--r--net/dsa/tag_trailer.c6
-rw-r--r--net/dsa/tag_vsc73xx_8021q.c68
-rw-r--r--net/dsa/tag_xrs700x.c11
-rw-r--r--net/dsa/tag_yt921x.c139
-rw-r--r--net/dsa/trace.c39
-rw-r--r--net/dsa/trace.h447
-rw-r--r--net/dsa/user.c (renamed from net/dsa/slave.c)2014
-rw-r--r--net/dsa/user.h71
44 files changed, 3619 insertions, 1929 deletions
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 8e698bea99a3..f86b30742122 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -42,12 +42,24 @@ config NET_DSA_TAG_BRCM
Broadcom switches which place the tag after the MAC source address.
config NET_DSA_TAG_BRCM_LEGACY
- tristate "Tag driver for Broadcom legacy switches using in-frame headers"
+ tristate "Tag driver for BCM63xx legacy switches using in-frame headers"
select NET_DSA_TAG_BRCM_COMMON
help
Say Y if you want to enable support for tagging frames for the
- Broadcom legacy switches which place the tag after the MAC source
+ BCM63xx legacy switches which place the tag after the MAC source
address.
+ This tag is used in BCM63xx legacy switches which work without the
+ original FCS and length before the tag insertion.
+
+config NET_DSA_TAG_BRCM_LEGACY_FCS
+ tristate "Tag driver for BCM53xx legacy switches using in-frame headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ BCM53xx legacy switches which place the tag after the MAC source
+ address.
+ This tag is used in BCM53xx legacy switches which expect original
+ FCS and length before the tag insertion to be present.
config NET_DSA_TAG_BRCM_PREPEND
tristate "Tag driver for Broadcom switches using prepended headers"
@@ -92,6 +104,14 @@ config NET_DSA_TAG_MTK
Say Y or M if you want to enable support for tagging frames for
Mediatek switches.
+config NET_DSA_TAG_MXL_GSW1XX
+ tristate "Tag driver for MaxLinear GSW1xx switches"
+ help
+ The GSW1xx family of switches supports an 8-byte special tag which
+ can be used on the CPU port of the switch.
+ Say Y or M if you want to enable support for tagging frames for
+ MaxLinear GSW1xx switches.
+
config NET_DSA_TAG_KSZ
tristate "Tag driver for Microchip 8795/937x/9477/9893 families of switches"
help
@@ -129,7 +149,7 @@ config NET_DSA_TAG_RTL4_A
tristate "Tag driver for Realtek 4 byte protocol A tags"
help
Say Y or M if you want to enable support for tagging frames for the
- Realtek switches with 4 byte protocol A tags, sich as found in
+ Realtek switches with 4 byte protocol A tags, such as found in
the Realtek RTL8366RB.
config NET_DSA_TAG_RTL8_4
@@ -166,10 +186,22 @@ config NET_DSA_TAG_TRAILER
Say Y or M if you want to enable support for tagging frames at
with a trailed. e.g. Marvell 88E6060.
+config NET_DSA_TAG_VSC73XX_8021Q
+ tristate "Tag driver for Microchip/Vitesse VSC73xx family of switches, using VLAN"
+ help
+ Say Y or M if you want to enable support for tagging frames with a
+ custom VLAN-based header.
+
config NET_DSA_TAG_XRS700X
tristate "Tag driver for XRS700x switches"
help
Say Y or M if you want to enable support for tagging frames for
Arrow SpeedChips XRS700x switches that use a single byte tag trailer.
+config NET_DSA_TAG_YT921X
+ tristate "Tag driver for Motorcomm YT921x switches"
+ help
+ Say Y or M if you want to enable support for tagging frames for
+ Motorcomm YT921x switches.
+
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index cc7e93a562fe..42d173f5a701 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,16 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
+
+# the stubs are built-in whenever DSA is built-in or module
+ifdef CONFIG_NET_DSA
+obj-y := stubs.o
+endif
+
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
dsa_core-y += \
+ conduit.o \
devlink.o \
dsa.o \
- master.o \
netlink.o \
port.o \
- slave.o \
switch.o \
tag.o \
- tag_8021q.o
+ tag_8021q.o \
+ trace.o \
+ user.o
# tagging formats
obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
@@ -21,6 +28,7 @@ obj-$(CONFIG_NET_DSA_TAG_HELLCREEK) += tag_hellcreek.o
obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
+obj-$(CONFIG_NET_DSA_TAG_MXL_GSW1XX) += tag_mxl-gsw1xx.o
obj-$(CONFIG_NET_DSA_TAG_NONE) += tag_none.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
obj-$(CONFIG_NET_DSA_TAG_OCELOT_8021Q) += tag_ocelot_8021q.o
@@ -30,4 +38,9 @@ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o
obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o
obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o
obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o
+obj-$(CONFIG_NET_DSA_TAG_YT921X) += tag_yt921x.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/net/dsa/master.c b/net/dsa/conduit.c
index 26d90140d271..a1b044467bd6 100644
--- a/net/dsa/master.c
+++ b/net/dsa/conduit.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Handling of a master device, switching frames via its switch fabric CPU port
+ * Handling of a conduit device, switching frames via its switch fabric CPU port
*
* Copyright (c) 2017 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -10,13 +10,14 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/dsa.h>
+#include <net/netdev_lock.h>
+#include "conduit.h"
#include "dsa.h"
-#include "master.h"
#include "port.h"
#include "tag.h"
-static int dsa_master_get_regs_len(struct net_device *dev)
+static int dsa_conduit_get_regs_len(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -25,8 +26,10 @@ static int dsa_master_get_regs_len(struct net_device *dev)
int ret = 0;
int len;
- if (ops->get_regs_len) {
+ if (ops && ops->get_regs_len) {
+ netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
+ netdev_unlock_ops(dev);
if (len < 0)
return len;
ret += len;
@@ -45,8 +48,8 @@ static int dsa_master_get_regs_len(struct net_device *dev)
return ret;
}
-static void dsa_master_get_regs(struct net_device *dev,
- struct ethtool_regs *regs, void *data)
+static void dsa_conduit_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -56,12 +59,16 @@ static void dsa_master_get_regs(struct net_device *dev,
int port = cpu_dp->index;
int len;
- if (ops->get_regs_len && ops->get_regs) {
+ if (ops && ops->get_regs_len && ops->get_regs) {
+ netdev_lock_ops(dev);
len = ops->get_regs_len(dev);
- if (len < 0)
+ if (len < 0) {
+ netdev_unlock_ops(dev);
return;
+ }
regs->len = len;
ops->get_regs(dev, regs, data);
+ netdev_unlock_ops(dev);
data += regs->len;
}
@@ -80,28 +87,56 @@ static void dsa_master_get_regs(struct net_device *dev,
}
}
-static void dsa_master_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static ssize_t dsa_conduit_append_port_stats(struct dsa_switch *ds, int port,
+ u64 *data, size_t start)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ int count;
+
+ if (!ds->ops->get_sset_count)
+ return 0;
+
+ count = ds->ops->get_sset_count(ds, port, ETH_SS_STATS);
+ if (count < 0)
+ return count;
+
+ if (ds->ops->get_ethtool_stats)
+ ds->ops->get_ethtool_stats(ds, port, data + start);
+
+ return count;
+}
+
+static void dsa_conduit_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
- int count = 0;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
- if (ops->get_sset_count && ops->get_ethtool_stats) {
- count = ops->get_sset_count(dev, ETH_SS_STATS);
+ if (ops && ops->get_sset_count && ops->get_ethtool_stats) {
+ netdev_lock_ops(dev);
+ mcount = ops->get_sset_count(dev, ETH_SS_STATS);
ops->get_ethtool_stats(dev, stats, data);
+ netdev_unlock_ops(dev);
}
- if (ds->ops->get_ethtool_stats)
- ds->ops->get_ethtool_stats(ds, port, data + count);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_stats(dp->ds, dp->index,
+ data, mcount);
+ if (count < 0)
+ return;
+
+ mcount += count;
+ }
}
-static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -109,13 +144,15 @@ static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
int port = cpu_dp->index;
int count = 0;
- if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ if (dev->phydev && (!ops || !ops->get_ethtool_phy_stats)) {
count = phy_ethtool_get_sset_count(dev->phydev);
if (count >= 0)
phy_ethtool_get_stats(dev->phydev, stats, data);
- } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ } else if (ops && ops->get_sset_count && ops->get_ethtool_phy_stats) {
+ netdev_lock_ops(dev);
count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
ops->get_ethtool_phy_stats(dev, stats, data);
+ netdev_unlock_ops(dev);
}
if (count < 0)
@@ -125,44 +162,84 @@ static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
ds->ops->get_ethtool_phy_stats(ds, port, data + count);
}
-static int dsa_master_get_sset_count(struct net_device *dev, int sset)
+static void dsa_conduit_append_port_sset_count(struct dsa_switch *ds, int port,
+ int sset, int *count)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
+ if (ds->ops->get_sset_count)
+ *count += ds->ops->get_sset_count(ds, port, sset);
+}
+
+static int dsa_conduit_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
int count = 0;
+ netdev_lock_ops(dev);
if (sset == ETH_SS_PHY_STATS && dev->phydev &&
- !ops->get_ethtool_phy_stats)
+ (!ops || !ops->get_ethtool_phy_stats))
count = phy_ethtool_get_sset_count(dev->phydev);
- else if (ops->get_sset_count)
+ else if (ops && ops->get_sset_count)
count = ops->get_sset_count(dev, sset);
+ netdev_unlock_ops(dev);
if (count < 0)
count = 0;
- if (ds->ops->get_sset_count)
- count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ dsa_conduit_append_port_sset_count(dp->ds, dp->index, sset,
+ &count);
+ }
return count;
}
-static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
- uint8_t *data)
+static ssize_t dsa_conduit_append_port_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data,
+ size_t start)
{
- struct dsa_port *cpu_dp = dev->dsa_ptr;
- const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
- struct dsa_switch *ds = cpu_dp->ds;
- int port = cpu_dp->index;
int len = ETH_GSTRING_LEN;
- int mcount = 0, count, i;
- uint8_t pfx[4];
- uint8_t *ndata;
+ u8 pfx[8], *ndata;
+ int count, i;
- snprintf(pfx, sizeof(pfx), "p%.2d", port);
+ if (!ds->ops->get_strings)
+ return 0;
+
+ snprintf(pfx, sizeof(pfx), "s%.2d_p%.2d", ds->index, port);
/* We do not want to be NULL-terminated, since this is a prefix */
pfx[sizeof(pfx) - 1] = '_';
+ ndata = data + start * len;
+ /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
+ * the output after to prepend our CPU port prefix we
+ * constructed earlier
+ */
+ ds->ops->get_strings(ds, port, stringset, ndata);
+ count = ds->ops->get_sset_count(ds, port, stringset);
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ memmove(ndata + (i * len + sizeof(pfx)),
+ ndata + i * len, len - sizeof(pfx));
+ memcpy(ndata + i * len, pfx, sizeof(pfx));
+ }
+ return count;
+}
+
+static void dsa_conduit_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ struct dsa_port *dp, *cpu_dp = dev->dsa_ptr;
+ const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ int count, mcount = 0;
+
+ netdev_lock_ops(dev);
if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
!ops->get_ethtool_phy_stats) {
mcount = phy_ethtool_get_sset_count(dev->phydev);
@@ -176,58 +253,48 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
mcount = 0;
ops->get_strings(dev, stringset, data);
}
+ netdev_unlock_ops(dev);
- if (ds->ops->get_strings) {
- ndata = data + mcount * len;
- /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
- * the output after to prepend our CPU port prefix we
- * constructed earlier
- */
- ds->ops->get_strings(ds, port, stringset, ndata);
- count = ds->ops->get_sset_count(ds, port, stringset);
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (!dsa_port_is_dsa(dp) && !dsa_port_is_cpu(dp))
+ continue;
+
+ count = dsa_conduit_append_port_strings(dp->ds, dp->index,
+ stringset, data,
+ mcount);
if (count < 0)
return;
- for (i = 0; i < count; i++) {
- memmove(ndata + (i * len + sizeof(pfx)),
- ndata + i * len, len - sizeof(pfx));
- memcpy(ndata + i * len, pfx, sizeof(pfx));
- }
+
+ mcount += count;
}
}
-static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+/* Deny PTP operations on conduit if there is at least one switch in the tree
+ * that is PTP capable.
+ */
+int __dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
struct dsa_switch_tree *dst;
- int err = -EOPNOTSUPP;
struct dsa_port *dp;
dst = ds->dst;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- case SIOCSHWTSTAMP:
- /* Deny PTP operations on master if there is at least one
- * switch in the tree that is PTP capable.
- */
- list_for_each_entry(dp, &dst->ports, list)
- if (dsa_port_supports_hwtstamp(dp, ifr))
- return -EBUSY;
- break;
+ list_for_each_entry(dp, &dst->ports, list) {
+ if (dsa_port_supports_hwtstamp(dp)) {
+ NL_SET_ERR_MSG(extack,
+ "HW timestamping not allowed on DSA conduit when switch supports the operation");
+ return -EBUSY;
+ }
}
- if (dev->netdev_ops->ndo_eth_ioctl)
- err = dev->netdev_ops->ndo_eth_ioctl(dev, ifr, cmd);
-
- return err;
+ return 0;
}
-static const struct dsa_netdevice_ops dsa_netdev_ops = {
- .ndo_eth_ioctl = dsa_master_ioctl,
-};
-
-static int dsa_master_ethtool_setup(struct net_device *dev)
+static int dsa_conduit_ethtool_setup(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
@@ -244,19 +311,19 @@ static int dsa_master_ethtool_setup(struct net_device *dev)
if (cpu_dp->orig_ethtool_ops)
memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
- ops->get_regs_len = dsa_master_get_regs_len;
- ops->get_regs = dsa_master_get_regs;
- ops->get_sset_count = dsa_master_get_sset_count;
- ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
- ops->get_strings = dsa_master_get_strings;
- ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
+ ops->get_regs_len = dsa_conduit_get_regs_len;
+ ops->get_regs = dsa_conduit_get_regs;
+ ops->get_sset_count = dsa_conduit_get_sset_count;
+ ops->get_ethtool_stats = dsa_conduit_get_ethtool_stats;
+ ops->get_strings = dsa_conduit_get_strings;
+ ops->get_ethtool_phy_stats = dsa_conduit_get_ethtool_phy_stats;
dev->ethtool_ops = ops;
return 0;
}
-static void dsa_master_ethtool_teardown(struct net_device *dev)
+static void dsa_conduit_ethtool_teardown(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -267,25 +334,16 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
-static void dsa_netdev_ops_set(struct net_device *dev,
- const struct dsa_netdevice_ops *ops)
-{
- if (netif_is_lag_master(dev))
- return;
-
- dev->dsa_ptr->netdev_ops = ops;
-}
-
-/* Keep the master always promiscuous if the tagging protocol requires that
+/* Keep the conduit always promiscuous if the tagging protocol requires that
* (garbles MAC DA) or if it doesn't support unicast filtering, case in which
* it would revert to promiscuous mode as soon as we call dev_uc_add() on it
* anyway.
*/
-static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
+static void dsa_conduit_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
- if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
+ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_conduit)
return;
ASSERT_RTNL();
@@ -299,7 +357,7 @@ static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
struct net_device *dev = to_net_dev(d);
struct dsa_port *cpu_dp = dev->dsa_ptr;
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
dsa_tag_protocol_to_str(cpu_dp->tag_ops));
}
@@ -352,17 +410,17 @@ out:
}
static DEVICE_ATTR_RW(tagging);
-static struct attribute *dsa_slave_attrs[] = {
+static struct attribute *dsa_user_attrs[] = {
&dev_attr_tagging.attr,
NULL
};
static const struct attribute_group dsa_group = {
.name = "dsa",
- .attrs = dsa_slave_attrs,
+ .attrs = dsa_user_attrs,
};
-static void dsa_master_reset_mtu(struct net_device *dev)
+static void dsa_conduit_reset_mtu(struct net_device *dev)
{
int err;
@@ -372,7 +430,7 @@ static void dsa_master_reset_mtu(struct net_device *dev)
"Unable to reset MTU to exclude DSA overheads\n");
}
-int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
@@ -381,7 +439,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
- /* The DSA master must use SET_NETDEV_DEV for this to work. */
+ /* The DSA conduit must use SET_NETDEV_DEV for this to work. */
if (!netif_is_lag_master(dev)) {
consumer_link = device_link_add(ds->dev, dev->dev.parent,
DL_FLAG_AUTOREMOVE_CONSUMER);
@@ -392,7 +450,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
}
/* The switch driver may not implement ->port_change_mtu(), case in
- * which dsa_slave_change_mtu() will not update the master MTU either,
+ * which dsa_user_change_mtu() will not update the conduit MTU either,
* so we need to do that here.
*/
ret = dev_set_mtu(dev, mtu);
@@ -408,35 +466,31 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
dev->dsa_ptr = cpu_dp;
- dsa_master_set_promiscuity(dev, 1);
+ dsa_conduit_set_promiscuity(dev, 1);
- ret = dsa_master_ethtool_setup(dev);
+ ret = dsa_conduit_ethtool_setup(dev);
if (ret)
goto out_err_reset_promisc;
- dsa_netdev_ops_set(dev, &dsa_netdev_ops);
-
ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
if (ret)
- goto out_err_ndo_teardown;
+ goto out_err_ethtool_teardown;
return ret;
-out_err_ndo_teardown:
- dsa_netdev_ops_set(dev, NULL);
- dsa_master_ethtool_teardown(dev);
+out_err_ethtool_teardown:
+ dsa_conduit_ethtool_teardown(dev);
out_err_reset_promisc:
- dsa_master_set_promiscuity(dev, -1);
+ dsa_conduit_set_promiscuity(dev, -1);
return ret;
}
-void dsa_master_teardown(struct net_device *dev)
+void dsa_conduit_teardown(struct net_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
- dsa_netdev_ops_set(dev, NULL);
- dsa_master_ethtool_teardown(dev);
- dsa_master_reset_mtu(dev);
- dsa_master_set_promiscuity(dev, -1);
+ dsa_conduit_ethtool_teardown(dev);
+ dsa_conduit_reset_mtu(dev);
+ dsa_conduit_set_promiscuity(dev, -1);
dev->dsa_ptr = NULL;
@@ -447,42 +501,40 @@ void dsa_master_teardown(struct net_device *dev)
wmb();
}
-int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack)
+int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
{
- bool master_setup = false;
+ bool conduit_setup = false;
int err;
if (!netdev_uses_dsa(lag_dev)) {
- err = dsa_master_setup(lag_dev, cpu_dp);
+ err = dsa_conduit_setup(lag_dev, cpu_dp);
if (err)
return err;
- master_setup = true;
+ conduit_setup = true;
}
err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
if (err) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "CPU port failed to join LAG");
- goto out_master_teardown;
+ NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
+ goto out_conduit_teardown;
}
return 0;
-out_master_teardown:
- if (master_setup)
- dsa_master_teardown(lag_dev);
+out_conduit_teardown:
+ if (conduit_setup)
+ dsa_conduit_teardown(lag_dev);
return err;
}
-/* Tear down a master if there isn't any other user port on it,
+/* Tear down a conduit if there isn't any other user port on it,
* optionally also destroying LAG information.
*/
-void dsa_master_lag_teardown(struct net_device *lag_dev,
- struct dsa_port *cpu_dp)
+void dsa_conduit_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp)
{
struct net_device *upper;
struct list_head *iter;
@@ -490,8 +542,8 @@ void dsa_master_lag_teardown(struct net_device *lag_dev,
dsa_port_lag_leave(cpu_dp, lag_dev);
netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
- if (dsa_slave_dev_check(upper))
+ if (dsa_user_dev_check(upper))
return;
- dsa_master_teardown(lag_dev);
+ dsa_conduit_teardown(lag_dev);
}
diff --git a/net/dsa/conduit.h b/net/dsa/conduit.h
new file mode 100644
index 000000000000..31f8834f54bb
--- /dev/null
+++ b/net/dsa/conduit.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DSA_CONDUIT_H
+#define __DSA_CONDUIT_H
+
+struct dsa_port;
+struct net_device;
+struct netdev_lag_upper_info;
+struct netlink_ext_ack;
+
+int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp);
+void dsa_conduit_teardown(struct net_device *dev);
+int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack);
+void dsa_conduit_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp);
+int __dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/net/dsa/devlink.c b/net/dsa/devlink.c
index 431bf52290a1..ed342f345692 100644
--- a/net/dsa/devlink.c
+++ b/net/dsa/devlink.c
@@ -182,7 +182,8 @@ static const struct devlink_ops dsa_devlink_ops = {
};
int dsa_devlink_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
@@ -194,7 +195,8 @@ int dsa_devlink_param_get(struct devlink *dl, u32 id,
EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
@@ -228,10 +230,15 @@ int dsa_devlink_resource_register(struct dsa_switch *ds,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params)
{
- return devlink_resource_register(ds->devlink, resource_name,
- resource_size, resource_id,
- parent_resource_id,
- size_params);
+ int ret;
+
+ devl_lock(ds->devlink);
+ ret = devl_resource_register(ds->devlink, resource_name, resource_size,
+ resource_id, parent_resource_id,
+ size_params);
+ devl_unlock(ds->devlink);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
@@ -246,15 +253,19 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv)
{
- return devlink_resource_occ_get_register(ds->devlink, resource_id,
- occ_get, occ_get_priv);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_register(ds->devlink, resource_id, occ_get,
+ occ_get_priv);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id)
{
- devlink_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_lock(ds->devlink);
+ devl_resource_occ_get_unregister(ds->devlink, resource_id);
+ devl_unlock(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e5f156940c67..a20efabe778f 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -9,24 +9,25 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/if_hsr.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
+#include "conduit.h"
#include "devlink.h"
#include "dsa.h"
-#include "master.h"
#include "netlink.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag.h"
+#include "user.h"
#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
@@ -364,18 +365,18 @@ static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
return NULL;
}
-struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
+struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst)
{
struct device_node *ethernet;
- struct net_device *master;
+ struct net_device *conduit;
struct dsa_port *cpu_dp;
cpu_dp = dsa_tree_find_first_cpu(dst);
ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
- master = of_find_net_device_by_node(ethernet);
+ conduit = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
- return master;
+ return conduit;
}
/* Assign the default CPU port (the first one in the tree) to all ports of the
@@ -402,6 +403,24 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
return 0;
}
+static struct dsa_port *
+dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp;
+
+ if (!ds->ops->preferred_default_local_cpu_port)
+ return NULL;
+
+ cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
+ if (!cpu_dp)
+ return NULL;
+
+ if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
+ return NULL;
+
+ return cpu_dp;
+}
+
/* Perform initial assignment of CPU ports to user ports and DSA links in the
* fabric, giving preference to CPU ports local to each switch. Default to
* using the first CPU port in the switch tree if the port does not have a CPU
@@ -409,12 +428,16 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
*/
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
- struct dsa_port *cpu_dp, *dp;
+ struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
list_for_each_entry(cpu_dp, &dst->ports, list) {
if (!dsa_port_is_cpu(cpu_dp))
continue;
+ preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
+ if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
+ continue;
+
/* Prefer a local CPU port */
dsa_switch_for_each_port(dp, cpu_dp->ds) {
/* Prefer the first local CPU port found */
@@ -494,7 +517,7 @@ static int dsa_port_setup(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_USER:
of_get_mac_address(dp->dn, dp->mac);
- err = dsa_slave_create(dp);
+ err = dsa_user_create(dp);
break;
}
@@ -531,9 +554,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
dsa_shared_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
- if (dp->slave) {
- dsa_slave_destroy(dp->slave);
- dp->slave = NULL;
+ if (dp->user) {
+ dsa_user_destroy(dp->user);
+ dp->user = NULL;
}
break;
}
@@ -603,15 +626,14 @@ static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
static int dsa_switch_setup(struct dsa_switch *ds)
{
- struct device_node *dn;
int err;
if (ds->setup)
return 0;
- /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
+ /* Initialize ds->phys_mii_mask before registering the user MDIO bus
* driver and before ops->setup() has run, since the switch drivers and
- * the slave MDIO bus driver rely on these values for probing PHY
+ * the user MDIO bus driver rely on these values for probing PHY
* devices or not
*/
ds->phys_mii_mask |= dsa_user_ports(ds);
@@ -634,21 +656,18 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (err)
goto teardown;
- if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = mdiobus_alloc();
- if (!ds->slave_mii_bus) {
+ if (!ds->user_mii_bus && ds->ops->phy_read) {
+ ds->user_mii_bus = mdiobus_alloc();
+ if (!ds->user_mii_bus) {
err = -ENOMEM;
goto teardown;
}
- dsa_slave_mii_bus_init(ds);
+ dsa_user_mii_bus_init(ds);
- dn = of_get_child_by_name(ds->dev->of_node, "mdio");
-
- err = of_mdiobus_register(ds->slave_mii_bus, dn);
- of_node_put(dn);
+ err = mdiobus_register(ds->user_mii_bus);
if (err < 0)
- goto free_slave_mii_bus;
+ goto free_user_mii_bus;
}
dsa_switch_devlink_register(ds);
@@ -656,9 +675,9 @@ static int dsa_switch_setup(struct dsa_switch *ds)
ds->setup = true;
return 0;
-free_slave_mii_bus:
- if (ds->slave_mii_bus && ds->ops->phy_read)
- mdiobus_free(ds->slave_mii_bus);
+free_user_mii_bus:
+ if (ds->user_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->user_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
@@ -676,10 +695,10 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
dsa_switch_devlink_unregister(ds);
- if (ds->slave_mii_bus && ds->ops->phy_read) {
- mdiobus_unregister(ds->slave_mii_bus);
- mdiobus_free(ds->slave_mii_bus);
- ds->slave_mii_bus = NULL;
+ if (ds->user_mii_bus && ds->ops->phy_read) {
+ mdiobus_unregister(ds->user_mii_bus);
+ mdiobus_free(ds->user_mii_bus);
+ ds->user_mii_bus = NULL;
}
dsa_switch_teardown_tag_protocol(ds);
@@ -770,7 +789,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
return err;
}
-static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
+static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
int err = 0;
@@ -778,18 +797,18 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
- struct net_device *master = cpu_dp->master;
- bool admin_up = (master->flags & IFF_UP) &&
- !qdisc_tx_is_noop(master);
+ struct net_device *conduit = cpu_dp->conduit;
+ bool admin_up = (conduit->flags & IFF_UP) &&
+ !qdisc_tx_is_noop(conduit);
- err = dsa_master_setup(master, cpu_dp);
+ err = dsa_conduit_setup(conduit, cpu_dp);
if (err)
break;
- /* Replay master state event */
- dsa_tree_master_admin_state_change(dst, master, admin_up);
- dsa_tree_master_oper_state_change(dst, master,
- netif_oper_up(master));
+ /* Replay conduit state event */
+ dsa_tree_conduit_admin_state_change(dst, conduit, admin_up);
+ dsa_tree_conduit_oper_state_change(dst, conduit,
+ netif_oper_up(conduit));
}
rtnl_unlock();
@@ -797,22 +816,22 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
return err;
}
-static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
+static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
- struct net_device *master = cpu_dp->master;
+ struct net_device *conduit = cpu_dp->conduit;
/* Synthesizing an "admin down" state is sufficient for
- * the switches to get a notification if the master is
+ * the switches to get a notification if the conduit is
* currently up and running.
*/
- dsa_tree_master_admin_state_change(dst, master, false);
+ dsa_tree_conduit_admin_state_change(dst, conduit, false);
- dsa_master_teardown(master);
+ dsa_conduit_teardown(conduit);
}
rtnl_unlock();
@@ -844,6 +863,16 @@ static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
kfree(dst->lags);
}
+static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
+{
+ struct dsa_link *dl, *next;
+
+ list_for_each_entry_safe(dl, next, &dst->rtable, list) {
+ list_del(&dl->list);
+ kfree(dl);
+ }
+}
+
static int dsa_tree_setup(struct dsa_switch_tree *dst)
{
bool complete;
@@ -861,7 +890,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
err = dsa_tree_setup_cpu_ports(dst);
if (err)
- return err;
+ goto teardown_rtable;
err = dsa_tree_setup_switches(dst);
if (err)
@@ -871,13 +900,13 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
if (err)
goto teardown_switches;
- err = dsa_tree_setup_master(dst);
+ err = dsa_tree_setup_conduit(dst);
if (err)
goto teardown_ports;
err = dsa_tree_setup_lags(dst);
if (err)
- goto teardown_master;
+ goto teardown_conduit;
dst->setup = true;
@@ -885,28 +914,28 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
return 0;
-teardown_master:
- dsa_tree_teardown_master(dst);
+teardown_conduit:
+ dsa_tree_teardown_conduit(dst);
teardown_ports:
dsa_tree_teardown_ports(dst);
teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
+teardown_rtable:
+ dsa_tree_teardown_routing_table(dst);
return err;
}
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
{
- struct dsa_link *dl, *next;
-
if (!dst->setup)
return;
dsa_tree_teardown_lags(dst);
- dsa_tree_teardown_master(dst);
+ dsa_tree_teardown_conduit(dst);
dsa_tree_teardown_ports(dst);
@@ -914,10 +943,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_cpu_ports(dst);
- list_for_each_entry_safe(dl, next, &dst->rtable, list) {
- list_del(&dl->list);
- kfree(dl);
- }
+ dsa_tree_teardown_routing_table(dst);
pr_info("DSA: tree %d torn down\n", dst->index);
@@ -955,7 +981,7 @@ out_disconnect:
return err;
}
-/* Since the dsa/tagging sysfs device attribute is per master, the assumption
+/* Since the dsa/tagging sysfs device attribute is per conduit, the assumption
* is that all DSA switches within a tree share the same tagger, otherwise
* they would have formed disjoint trees (different "dsa,member" values).
*/
@@ -976,10 +1002,10 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
* restriction, there needs to be another mutex which serializes this.
*/
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp)->flags & IFF_UP)
+ if (dsa_port_to_conduit(dp)->flags & IFF_UP)
goto out_unlock;
- if (dp->slave->flags & IFF_UP)
+ if (dp->user->flags & IFF_UP)
goto out_unlock;
}
@@ -1005,62 +1031,62 @@ out_unlock:
return err;
}
-static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
- struct net_device *master)
+static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit)
{
- struct dsa_notifier_master_state_info info;
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_notifier_conduit_state_info info;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
- info.master = master;
- info.operational = dsa_port_master_is_operational(cpu_dp);
+ info.conduit = conduit;
+ info.operational = dsa_port_conduit_is_operational(cpu_dp);
- dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
+ dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info);
}
-void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up)
+void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
bool notify = false;
- /* Don't keep track of admin state on LAG DSA masters,
- * but rather just of physical DSA masters
+ /* Don't keep track of admin state on LAG DSA conduits,
+ * but rather just of physical DSA conduits
*/
- if (netif_is_lag_master(master))
+ if (netif_is_lag_master(conduit))
return;
- if ((dsa_port_master_is_operational(cpu_dp)) !=
- (up && cpu_dp->master_oper_up))
+ if ((dsa_port_conduit_is_operational(cpu_dp)) !=
+ (up && cpu_dp->conduit_oper_up))
notify = true;
- cpu_dp->master_admin_up = up;
+ cpu_dp->conduit_admin_up = up;
if (notify)
- dsa_tree_master_state_change(dst, master);
+ dsa_tree_conduit_state_change(dst, conduit);
}
-void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up)
+void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
bool notify = false;
- /* Don't keep track of oper state on LAG DSA masters,
- * but rather just of physical DSA masters
+ /* Don't keep track of oper state on LAG DSA conduits,
+ * but rather just of physical DSA conduits
*/
- if (netif_is_lag_master(master))
+ if (netif_is_lag_master(conduit))
return;
- if ((dsa_port_master_is_operational(cpu_dp)) !=
- (cpu_dp->master_admin_up && up))
+ if ((dsa_port_conduit_is_operational(cpu_dp)) !=
+ (cpu_dp->conduit_admin_up && up))
notify = true;
- cpu_dp->master_oper_up = up;
+ cpu_dp->conduit_oper_up = up;
if (notify)
- dsa_tree_master_state_change(dst, master);
+ dsa_tree_conduit_state_change(dst, conduit);
}
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
@@ -1083,7 +1109,7 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
mutex_init(&dp->vlans_lock);
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
- INIT_LIST_HEAD(&dp->vlans);
+ INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
INIT_LIST_HEAD(&dp->list);
list_add_tail(&dp->list, &dst->ports);
@@ -1106,7 +1132,7 @@ static int dsa_port_parse_dsa(struct dsa_port *dp)
}
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
- struct net_device *master)
+ struct net_device *conduit)
{
enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
struct dsa_switch *mds, *ds = dp->ds;
@@ -1117,21 +1143,21 @@ static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
* happens the switch driver may want to know if its tagging protocol
* is going to work in such a configuration.
*/
- if (dsa_slave_dev_check(master)) {
- mdp = dsa_slave_to_port(master);
+ if (dsa_user_dev_check(conduit)) {
+ mdp = dsa_user_to_port(conduit);
mds = mdp->ds;
mdp_upstream = dsa_upstream_port(mds, mdp->index);
tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
DSA_TAG_PROTO_NONE);
}
- /* If the master device is not itself a DSA slave in a disjoint DSA
+ /* If the conduit device is not itself a DSA user in a disjoint DSA
* tree, then return immediately.
*/
return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}
-static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
+static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
const char *user_protocol)
{
const struct dsa_device_ops *tag_ops = NULL;
@@ -1140,7 +1166,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
enum dsa_tag_protocol default_proto;
/* Find out which protocol the switch would prefer. */
- default_proto = dsa_get_tag_protocol(dp, master);
+ default_proto = dsa_get_tag_protocol(dp, conduit);
if (dst->default_proto) {
if (dst->default_proto != default_proto) {
dev_err(ds->dev,
@@ -1195,7 +1221,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
dst->tag_ops = tag_ops;
}
- dp->master = master;
+ dp->conduit = conduit;
dp->type = DSA_PORT_TYPE_CPU;
dsa_port_set_tag_protocol(dp, dst->tag_ops);
dp->dst = dst;
@@ -1225,16 +1251,16 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
dp->dn = dn;
if (ethernet) {
- struct net_device *master;
+ struct net_device *conduit;
const char *user_protocol;
- master = of_find_net_device_by_node(ethernet);
+ conduit = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
- if (!master)
+ if (!conduit)
return -EPROBE_DEFER;
user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
- return dsa_port_parse_cpu(dp, master, user_protocol);
+ return dsa_port_parse_cpu(dp, conduit, user_protocol);
}
if (link)
@@ -1349,7 +1375,7 @@ static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
return dsa_switch_parse_ports_of(ds, dn);
}
-static int dev_is_class(struct device *dev, void *class)
+static int dev_is_class(struct device *dev, const void *class)
{
if (dev->class != NULL && !strcmp(dev->class->name, class))
return 1;
@@ -1389,15 +1415,15 @@ static int dsa_port_parse(struct dsa_port *dp, const char *name,
struct device *dev)
{
if (!strcmp(name, "cpu")) {
- struct net_device *master;
+ struct net_device *conduit;
- master = dsa_dev_to_net_device(dev);
- if (!master)
+ conduit = dsa_dev_to_net_device(dev);
+ if (!conduit)
return -EPROBE_DEFER;
- dev_put(master);
+ dev_put(conduit);
- return dsa_port_parse_cpu(dp, master, NULL);
+ return dsa_port_parse_cpu(dp, conduit, NULL);
}
if (!strcmp(name, "dsa"))
@@ -1460,12 +1486,44 @@ static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
static void dsa_switch_release_ports(struct dsa_switch *ds)
{
+ struct dsa_mac_addr *a, *tmp;
struct dsa_port *dp, *next;
+ struct dsa_vlan *v, *n;
dsa_switch_for_each_port_safe(dp, next, ds) {
- WARN_ON(!list_empty(&dp->fdbs));
- WARN_ON(!list_empty(&dp->mdbs));
- WARN_ON(!list_empty(&dp->vlans));
+ /* These are either entries that upper layers lost track of
+ * (probably due to bugs), or installed through interfaces
+ * where one does not necessarily have to remove them, like
+ * ndo_dflt_fdb_add().
+ */
+ list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up unicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
+ dev_info(ds->dev,
+ "Cleaning up multicast address %pM vid %u from port %d\n",
+ a->addr, a->vid, dp->index);
+ list_del(&a->list);
+ kfree(a);
+ }
+
+ /* These are entries that upper layers have lost track of,
+ * probably due to bugs, but also due to dsa_port_do_vlan_del()
+ * having failed and the VLAN entry still lingering on.
+ */
+ list_for_each_entry_safe(v, n, &dp->vlans, list) {
+ dev_info(ds->dev,
+ "Cleaning up vid %u from port %d\n",
+ v->vid, dp->index);
+ list_del(&v->list);
+ kfree(v);
+ }
+
list_del(&dp->list);
kfree(dp);
}
@@ -1543,14 +1601,15 @@ void dsa_unregister_switch(struct dsa_switch *ds)
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
-/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+/* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is
* blocking that operation from completion, due to the dev_hold taken inside
- * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
- * the DSA master, so that the system can reboot successfully.
+ * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of
+ * the DSA conduit, so that the system can reboot successfully.
*/
void dsa_switch_shutdown(struct dsa_switch *ds)
{
- struct net_device *master, *slave_dev;
+ struct net_device *conduit, *user_dev;
+ LIST_HEAD(close_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1560,18 +1619,24 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
rtnl_lock();
+ dsa_switch_for_each_cpu_port(dp, ds)
+ list_add(&dp->conduit->close_list, &close_list);
+
+ netif_close_many(&close_list, true);
+
dsa_switch_for_each_user_port(dp, ds) {
- master = dsa_port_to_master(dp);
- slave_dev = dp->slave;
+ conduit = dsa_port_to_conduit(dp);
+ user_dev = dp->user;
- netdev_upper_dev_unlink(master, slave_dev);
+ netif_device_detach(user_dev);
+ netdev_upper_dev_unlink(conduit, user_dev);
}
- /* Disconnect from further netdevice notifiers on the master,
+ /* Disconnect from further netdevice notifiers on the conduit,
* since netdev_uses_dsa() will now return false.
*/
dsa_switch_for_each_cpu_port(dp, ds)
- dp->master->dsa_ptr = NULL;
+ dp->conduit->dsa_ptr = NULL;
rtnl_unlock();
out:
@@ -1582,7 +1647,7 @@ EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
#ifdef CONFIG_PM_SLEEP
static bool dsa_port_is_initialized(const struct dsa_port *dp)
{
- return dp->type == DSA_PORT_TYPE_USER && dp->slave;
+ return dp->type == DSA_PORT_TYPE_USER && dp->user;
}
int dsa_switch_suspend(struct dsa_switch *ds)
@@ -1590,12 +1655,12 @@ int dsa_switch_suspend(struct dsa_switch *ds)
struct dsa_port *dp;
int ret = 0;
- /* Suspend slave network devices */
+ /* Suspend user network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_suspend(dp->slave);
+ ret = dsa_user_suspend(dp->user);
if (ret)
return ret;
}
@@ -1618,12 +1683,12 @@ int dsa_switch_resume(struct dsa_switch *ds)
if (ret)
return ret;
- /* Resume slave network devices */
+ /* Resume user network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_resume(dp->slave);
+ ret = dsa_user_resume(dp->user);
if (ret)
return ret;
}
@@ -1635,10 +1700,10 @@ EXPORT_SYMBOL_GPL(dsa_switch_resume);
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
{
- if (!netdev || !dsa_slave_dev_check(netdev))
+ if (!netdev || !dsa_user_dev_check(netdev))
return ERR_PTR(-ENODEV);
- return dsa_slave_to_port(netdev);
+ return dsa_user_to_port(netdev);
}
EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
@@ -1702,6 +1767,84 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
+/* Helpers for switches without specific HSR offloads, but which can implement
+ * NETIF_F_HW_HSR_DUP because their tagger uses dsa_xmit_port_mask()
+ */
+int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ enum hsr_port_type type;
+ int err;
+
+ err = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
+ if (err)
+ return err;
+
+ if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only HSR slave ports can be offloaded");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_validate);
+
+int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ int err;
+
+ err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
+ if (err)
+ return err;
+
+ dsa_hsr_foreach_port(other_dp, ds, hsr) {
+ if (other_dp != dp) {
+ dp->user->features |= NETIF_F_HW_HSR_DUP;
+ other_dp->user->features |= NETIF_F_HW_HSR_DUP;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_join);
+
+int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+
+ dsa_hsr_foreach_port(other_dp, ds, hsr) {
+ if (other_dp != dp) {
+ dp->user->features &= ~NETIF_F_HW_HSR_DUP;
+ other_dp->user->features &= ~NETIF_F_HW_HSR_DUP;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_leave);
+
+static const struct dsa_stubs __dsa_stubs = {
+ .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
+};
+
+static void dsa_register_stubs(void)
+{
+ dsa_stubs = &__dsa_stubs;
+}
+
+static void dsa_unregister_stubs(void)
+{
+ dsa_stubs = NULL;
+}
+
static int __init dsa_init_module(void)
{
int rc;
@@ -1711,7 +1854,7 @@ static int __init dsa_init_module(void)
if (!dsa_owq)
return -ENOMEM;
- rc = dsa_slave_register_notifier();
+ rc = dsa_user_register_notifier();
if (rc)
goto register_notifier_fail;
@@ -1721,10 +1864,12 @@ static int __init dsa_init_module(void)
if (rc)
goto netlink_register_fail;
+ dsa_register_stubs();
+
return 0;
netlink_register_fail:
- dsa_slave_unregister_notifier();
+ dsa_user_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
register_notifier_fail:
destroy_workqueue(dsa_owq);
@@ -1735,9 +1880,11 @@ module_init(dsa_init_module);
static void __exit dsa_cleanup_module(void)
{
+ dsa_unregister_stubs();
+
rtnl_link_unregister(&dsa_link_ops);
- dsa_slave_unregister_notifier();
+ dsa_user_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
destroy_workqueue(dsa_owq);
}
@@ -1747,3 +1894,4 @@ MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/net/dsa/dsa.h b/net/dsa/dsa.h
index b7e17ae1094d..3cc7823e9ef3 100644
--- a/net/dsa/dsa.h
+++ b/net/dsa/dsa.h
@@ -21,16 +21,16 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev);
-struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst);
+struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
-void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
+void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up);
+void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
bool up);
-void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up);
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
void dsa_bridge_num_put(const struct net_device *bridge_dev,
unsigned int bridge_num);
diff --git a/net/dsa/master.h b/net/dsa/master.h
deleted file mode 100644
index 3fc0e610b5b5..000000000000
--- a/net/dsa/master.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef __DSA_MASTER_H
-#define __DSA_MASTER_H
-
-struct dsa_port;
-struct net_device;
-struct netdev_lag_upper_info;
-struct netlink_ext_ack;
-
-int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
-void dsa_master_teardown(struct net_device *dev);
-int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack);
-void dsa_master_lag_teardown(struct net_device *lag_dev,
- struct dsa_port *cpu_dp);
-
-#endif
diff --git a/net/dsa/netlink.c b/net/dsa/netlink.c
index bd4bbaf851de..1332e56349e5 100644
--- a/net/dsa/netlink.c
+++ b/net/dsa/netlink.c
@@ -5,10 +5,10 @@
#include <net/rtnetlink.h>
#include "netlink.h"
-#include "slave.h"
+#include "user.h"
static const struct nla_policy dsa_policy[IFLA_DSA_MAX + 1] = {
- [IFLA_DSA_MASTER] = { .type = NLA_U32 },
+ [IFLA_DSA_CONDUIT] = { .type = NLA_U32 },
};
static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -20,15 +20,15 @@ static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
if (!data)
return 0;
- if (data[IFLA_DSA_MASTER]) {
- u32 ifindex = nla_get_u32(data[IFLA_DSA_MASTER]);
- struct net_device *master;
+ if (data[IFLA_DSA_CONDUIT]) {
+ u32 ifindex = nla_get_u32(data[IFLA_DSA_CONDUIT]);
+ struct net_device *conduit;
- master = __dev_get_by_index(dev_net(dev), ifindex);
- if (!master)
+ conduit = __dev_get_by_index(dev_net(dev), ifindex);
+ if (!conduit)
return -EINVAL;
- err = dsa_slave_change_master(dev, master, extack);
+ err = dsa_user_change_conduit(dev, conduit, extack);
if (err)
return err;
}
@@ -38,15 +38,15 @@ static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
static size_t dsa_get_size(const struct net_device *dev)
{
- return nla_total_size(sizeof(u32)) + /* IFLA_DSA_MASTER */
+ return nla_total_size(sizeof(u32)) + /* IFLA_DSA_CONDUIT */
0;
}
static int dsa_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
- if (nla_put_u32(skb, IFLA_DSA_MASTER, master->ifindex))
+ if (nla_put_u32(skb, IFLA_DSA_CONDUIT, conduit->ifindex))
return -EMSGSIZE;
return 0;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 67ad1adec2a2..ca3a7f52229b 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -14,9 +14,9 @@
#include "dsa.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
+#include "user.h"
/**
* dsa_port_notify - Notify the switching fabric of changes to a port
@@ -114,19 +114,17 @@ static bool dsa_port_can_configure_learning(struct dsa_port *dp)
return !err;
}
-bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr)
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp)
{
+ struct kernel_hwtstamp_config config = {};
struct dsa_switch *ds = dp->ds;
int err;
if (!ds->ops->port_hwtstamp_get || !ds->ops->port_hwtstamp_set)
return false;
- /* "See through" shim implementations of the "get" method.
- * This will clobber the ifreq structure, but we will either return an
- * error, or the master will overwrite it with proper values.
- */
- err = ds->ops->port_hwtstamp_get(ds, dp->index, ifr);
+ /* "See through" shim implementations of the "get" method. */
+ err = ds->ops->port_hwtstamp_get(ds, dp->index, &config);
return err != -EOPNOTSUPP;
}
@@ -287,7 +285,7 @@ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
}
/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
- * event for changing vlan_filtering setting upon slave ports leaving
+ * event for changing vlan_filtering setting upon user ports leaving
* it. That is a good thing, because that lets us handle it and also
* handle the case where the switch's vlan_filtering setting is global
* (not per port). When that happens, the correct moment to trigger the
@@ -487,7 +485,7 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
.dp = dp,
.extack = extack,
};
- struct net_device *dev = dp->slave;
+ struct net_device *dev = dp->user;
struct net_device *brport_dev;
int err;
@@ -512,8 +510,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
err = switchdev_bridge_port_offload(brport_dev, dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier,
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier,
dp->bridge->tx_fwd_offload, extack);
if (err)
goto out_rollback_unbridge;
@@ -526,8 +524,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
out_rollback_unoffload:
switchdev_bridge_port_unoffload(brport_dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier);
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier);
dsa_flush_workqueue();
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
@@ -545,8 +543,8 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
return;
switchdev_bridge_port_unoffload(brport_dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier);
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier);
dsa_flush_workqueue();
}
@@ -739,10 +737,10 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
*/
if (vlan_filtering && dsa_port_is_user(dp)) {
struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *upper_dev, *slave = dp->slave;
+ struct net_device *upper_dev, *user = dp->user;
struct list_head *iter;
- netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
struct bridge_vlan_info br_info;
u16 vid;
@@ -801,9 +799,9 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
if (!ds->ops->port_vlan_filtering)
return -EOPNOTSUPP;
- /* We are called from dsa_slave_switchdev_blocking_event(),
+ /* We are called from dsa_user_switchdev_blocking_event(),
* which is not under rcu_read_lock(), unlike
- * dsa_slave_switchdev_event().
+ * dsa_user_switchdev_event().
*/
rcu_read_lock();
apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
@@ -825,24 +823,24 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
ds->vlan_filtering = vlan_filtering;
dsa_switch_for_each_user_port(other_dp, ds) {
- struct net_device *slave = other_dp->slave;
+ struct net_device *user = other_dp->user;
/* We might be called in the unbind path, so not
- * all slave devices might still be registered.
+ * all user devices might still be registered.
*/
- if (!slave)
+ if (!user)
continue;
- err = dsa_slave_manage_vlan_filtering(slave,
- vlan_filtering);
+ err = dsa_user_manage_vlan_filtering(user,
+ vlan_filtering);
if (err)
goto restore;
}
} else {
dp->vlan_filtering = vlan_filtering;
- err = dsa_slave_manage_vlan_filtering(dp->slave,
- vlan_filtering);
+ err = dsa_user_manage_vlan_filtering(dp->user,
+ vlan_filtering);
if (err)
goto restore;
}
@@ -861,7 +859,7 @@ restore:
}
/* This enforces legacy behavior for switch drivers which assume they can't
- * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
+ * receive VLAN configuration when joining a bridge with vlan_filtering=0
*/
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
@@ -1028,9 +1026,6 @@ static int dsa_port_host_fdb_add(struct dsa_port *dp,
.db = db,
};
- if (!dp->ds->fdb_isolation)
- info.db.bridge.num = 0;
-
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
}
@@ -1048,19 +1043,22 @@ int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
- /* Avoid a call to __dev_set_promiscuity() on the master, which
+ if (!dp->ds->fdb_isolation)
+ db.bridge.num = 0;
+
+ /* Avoid a call to __dev_set_promiscuity() on the conduit, which
* requires rtnl_lock(), since we can't guarantee that is held here,
* and we can't take it either.
*/
- if (master->priv_flags & IFF_UNICAST_FLT) {
- err = dev_uc_add(master, addr);
+ if (conduit->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_add(conduit, addr);
if (err)
return err;
}
@@ -1079,9 +1077,6 @@ static int dsa_port_host_fdb_del(struct dsa_port *dp,
.db = db,
};
- if (!dp->ds->fdb_isolation)
- info.db.bridge.num = 0;
-
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
}
@@ -1099,15 +1094,18 @@ int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
- if (master->priv_flags & IFF_UNICAST_FLT) {
- err = dev_uc_del(master, addr);
+ if (!dp->ds->fdb_isolation)
+ db.bridge.num = 0;
+
+ if (conduit->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_del(conduit, addr);
if (err)
return err;
}
@@ -1210,9 +1208,6 @@ static int dsa_port_host_mdb_add(const struct dsa_port *dp,
.db = db,
};
- if (!dp->ds->fdb_isolation)
- info.db.bridge.num = 0;
-
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
}
@@ -1230,14 +1225,17 @@ int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
- err = dev_mc_add(master, mdb->addr);
+ if (!dp->ds->fdb_isolation)
+ db.bridge.num = 0;
+
+ err = dev_mc_add(conduit, mdb->addr);
if (err)
return err;
@@ -1254,9 +1252,6 @@ static int dsa_port_host_mdb_del(const struct dsa_port *dp,
.db = db,
};
- if (!dp->ds->fdb_isolation)
- info.db.bridge.num = 0;
-
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
}
@@ -1274,14 +1269,17 @@ int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
};
int err;
- err = dev_mc_del(master, mdb->addr);
+ if (!dp->ds->fdb_isolation)
+ db.bridge.num = 0;
+
+ err = dev_mc_del(conduit, mdb->addr);
if (err)
return err;
@@ -1316,7 +1314,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
@@ -1328,7 +1326,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
if (err && err != -EOPNOTSUPP)
return err;
- vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid);
+ vlan_vid_add(conduit, htons(ETH_P_8021Q), vlan->vid);
return err;
}
@@ -1336,7 +1334,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
int dsa_port_host_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
@@ -1347,7 +1345,7 @@ int dsa_port_host_vlan_del(struct dsa_port *dp,
if (err && err != -EOPNOTSUPP)
return err;
- vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
+ vlan_vid_del(conduit, htons(ETH_P_8021Q), vlan->vid);
return err;
}
@@ -1396,24 +1394,24 @@ int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
}
-static int dsa_port_assign_master(struct dsa_port *dp,
- struct net_device *master,
- struct netlink_ext_ack *extack,
- bool fail_on_err)
+static int dsa_port_assign_conduit(struct dsa_port *dp,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack,
+ bool fail_on_err)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index, err;
- err = ds->ops->port_change_master(ds, port, master, extack);
+ err = ds->ops->port_change_conduit(ds, port, conduit, extack);
if (err && !fail_on_err)
- dev_err(ds->dev, "port %d failed to assign master %s: %pe\n",
- port, master->name, ERR_PTR(err));
+ dev_err(ds->dev, "port %d failed to assign conduit %s: %pe\n",
+ port, conduit->name, ERR_PTR(err));
if (err && fail_on_err)
return err;
- dp->cpu_dp = master->dsa_ptr;
- dp->cpu_port_in_lag = netif_is_lag_master(master);
+ dp->cpu_dp = conduit->dsa_ptr;
+ dp->cpu_port_in_lag = netif_is_lag_master(conduit);
return 0;
}
@@ -1426,12 +1424,12 @@ static int dsa_port_assign_master(struct dsa_port *dp,
* the old CPU port before changing it, and restore it on errors during the
* bringup of the new one.
*/
-int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
- struct netlink_ext_ack *extack)
+int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
+ struct netlink_ext_ack *extack)
{
struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
- struct net_device *old_master = dsa_port_to_master(dp);
- struct net_device *dev = dp->slave;
+ struct net_device *old_conduit = dsa_port_to_conduit(dp);
+ struct net_device *dev = dp->user;
struct dsa_switch *ds = dp->ds;
bool vlan_filtering;
int err, tmp;
@@ -1452,7 +1450,7 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
*/
vlan_filtering = dsa_port_is_vlan_filtering(dp);
if (vlan_filtering) {
- err = dsa_slave_manage_vlan_filtering(dev, false);
+ err = dsa_user_manage_vlan_filtering(dev, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to remove standalone VLANs");
@@ -1463,16 +1461,40 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
/* Standalone addresses, and addresses of upper interfaces like
* VLAN, LAG, HSR need to be migrated.
*/
- dsa_slave_unsync_ha(dev);
+ dsa_user_unsync_ha(dev);
+
+ /* If live-changing, we also need to uninstall the user device address
+ * from the port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
- err = dsa_port_assign_master(dp, master, extack, true);
+ err = dsa_port_assign_conduit(dp, conduit, extack, true);
if (err)
goto rewind_old_addrs;
- dsa_slave_sync_ha(dev);
+ /* If the port doesn't have its own MAC address and relies on the DSA
+ * conduit's one, inherit it again from the new DSA conduit.
+ */
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, conduit);
+
+ /* If live-changing, we need to install the user device address to the
+ * port FDB and the conduit interface.
+ */
+ if (dev->flags & IFF_UP) {
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to install host UC address");
+ goto rewind_addr_inherit;
+ }
+ }
+
+ dsa_user_sync_ha(dev);
if (vlan_filtering) {
- err = dsa_slave_manage_vlan_filtering(dev, true);
+ err = dsa_user_manage_vlan_filtering(dev, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to restore standalone VLANs");
@@ -1493,19 +1515,35 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
rewind_new_vlan:
if (vlan_filtering)
- dsa_slave_manage_vlan_filtering(dev, false);
+ dsa_user_manage_vlan_filtering(dev, false);
rewind_new_addrs:
- dsa_slave_unsync_ha(dev);
+ dsa_user_unsync_ha(dev);
+
+ if (dev->flags & IFF_UP)
+ dsa_user_host_uc_uninstall(dev);
+
+rewind_addr_inherit:
+ if (is_zero_ether_addr(dp->mac))
+ eth_hw_addr_inherit(dev, old_conduit);
- dsa_port_assign_master(dp, old_master, NULL, false);
+ dsa_port_assign_conduit(dp, old_conduit, NULL, false);
/* Restore the objects on the old CPU port */
rewind_old_addrs:
- dsa_slave_sync_ha(dev);
+ if (dev->flags & IFF_UP) {
+ tmp = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (tmp) {
+ dev_err(ds->dev,
+ "port %d failed to restore host UC address: %pe\n",
+ dp->index, ERR_PTR(tmp));
+ }
+ }
+
+ dsa_user_sync_ha(dev);
if (vlan_filtering) {
- tmp = dsa_slave_manage_vlan_filtering(dev, true);
+ tmp = dsa_user_manage_vlan_filtering(dev, true);
if (tmp) {
dev_err(ds->dev,
"port %d failed to restore standalone VLANs: %pe\n",
@@ -1533,116 +1571,32 @@ void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
cpu_dp->tag_ops = tag_ops;
}
-static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
-{
- struct device_node *phy_dn;
- struct phy_device *phydev;
-
- phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
- if (!phy_dn)
- return NULL;
-
- phydev = of_phy_find_device(phy_dn);
- if (!phydev) {
- of_node_put(phy_dn);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- of_node_put(phy_dn);
- return phydev;
-}
-
-static void dsa_port_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- /* Skip call for drivers which don't yet set mac_capabilities,
- * since validating in that case would mean their PHY will advertise
- * nothing. In turn, skipping validation makes them advertise
- * everything that the PHY supports, so those drivers should be
- * converted ASAP.
- */
- if (config->mac_capabilities)
- phylink_generic_validate(config, supported, state);
-}
-
-static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
- int err;
-
- /* Only called for inband modes */
- if (!ds->ops->phylink_mac_link_state) {
- state->link = 0;
- return;
- }
-
- err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
- if (err < 0) {
- dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
- dp->index, err);
- state->link = 0;
- }
-}
-
-static struct phylink_pcs *
-dsa_port_phylink_mac_select_pcs(struct phylink_config *config,
- phy_interface_t interface)
+/* dsa_supports_eee - indicate that EEE is supported
+ * @ds: pointer to &struct dsa_switch
+ * @port: port index
+ *
+ * A default implementation for the .support_eee() DSA operations member,
+ * which drivers can use to indicate that they support EEE on all of their
+ * user ports.
+ *
+ * Returns: true
+ */
+bool dsa_supports_eee(struct dsa_switch *ds, int port)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
- struct dsa_switch *ds = dp->ds;
-
- if (ds->ops->phylink_mac_select_pcs)
- pcs = ds->ops->phylink_mac_select_pcs(ds, dp->index, interface);
-
- return pcs;
+ return true;
}
+EXPORT_SYMBOL_GPL(dsa_supports_eee);
static void dsa_port_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_config)
- return;
-
- ds->ops->phylink_mac_config(ds, dp->index, mode, state);
-}
-
-static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
-{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_an_restart)
- return;
-
- ds->ops->phylink_mac_an_restart(ds, dp->index);
}
static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct phy_device *phydev = NULL;
- struct dsa_switch *ds = dp->ds;
-
- if (dsa_port_is_user(dp))
- phydev = dp->slave->phydev;
-
- if (!ds->ops->phylink_mac_link_down) {
- if (ds->ops->adjust_link && phydev)
- ds->ops->adjust_link(ds, dp->index, phydev);
- return;
- }
-
- ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
}
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
@@ -1652,31 +1606,17 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->phylink_mac_link_up) {
- if (ds->ops->adjust_link && phydev)
- ds->ops->adjust_link(ds, dp->index, phydev);
- return;
- }
-
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
}
static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
- .validate = dsa_port_phylink_validate,
- .mac_select_pcs = dsa_port_phylink_mac_select_pcs,
- .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
.mac_config = dsa_port_phylink_mac_config,
- .mac_an_restart = dsa_port_phylink_mac_an_restart,
.mac_link_down = dsa_port_phylink_mac_link_down,
.mac_link_up = dsa_port_phylink_mac_link_up,
};
int dsa_port_phylink_create(struct dsa_port *dp)
{
+ const struct phylink_mac_ops *mac_ops;
struct dsa_switch *ds = dp->ds;
phy_interface_t mode;
struct phylink *pl;
@@ -1686,18 +1626,26 @@ int dsa_port_phylink_create(struct dsa_port *dp)
if (err)
mode = PHY_INTERFACE_MODE_NA;
- /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
- * an indicator of a legacy phylink driver.
- */
- if (ds->ops->phylink_mac_link_state ||
- ds->ops->phylink_mac_an_restart)
- dp->pl_config.legacy_pre_march2020 = true;
-
- if (ds->ops->phylink_get_caps)
+ if (ds->ops->phylink_get_caps) {
ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
+ } else {
+ /* For legacy drivers */
+ if (mode != PHY_INTERFACE_MODE_NA) {
+ __set_bit(mode, dp->pl_config.supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ dp->pl_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ dp->pl_config.supported_interfaces);
+ }
+ }
- pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
- mode, &dsa_port_phylink_mac_ops);
+ mac_ops = &dsa_port_phylink_mac_ops;
+ if (ds->phylink_mac_ops)
+ mac_ops = ds->phylink_mac_ops;
+
+ pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), mode,
+ mac_ops);
if (IS_ERR(pl)) {
pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl));
return PTR_ERR(pl);
@@ -1714,78 +1662,6 @@ void dsa_port_phylink_destroy(struct dsa_port *dp)
dp->pl = NULL;
}
-static int dsa_shared_port_setup_phy_of(struct dsa_port *dp, bool enable)
-{
- struct dsa_switch *ds = dp->ds;
- struct phy_device *phydev;
- int port = dp->index;
- int err = 0;
-
- phydev = dsa_port_get_phy_device(dp);
- if (!phydev)
- return 0;
-
- if (IS_ERR(phydev))
- return PTR_ERR(phydev);
-
- if (enable) {
- err = genphy_resume(phydev);
- if (err < 0)
- goto err_put_dev;
-
- err = genphy_read_status(phydev);
- if (err < 0)
- goto err_put_dev;
- } else {
- err = genphy_suspend(phydev);
- if (err < 0)
- goto err_put_dev;
- }
-
- if (ds->ops->adjust_link)
- ds->ops->adjust_link(ds, port, phydev);
-
- dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
-
-err_put_dev:
- put_device(&phydev->mdio.dev);
- return err;
-}
-
-static int dsa_shared_port_fixed_link_register_of(struct dsa_port *dp)
-{
- struct device_node *dn = dp->dn;
- struct dsa_switch *ds = dp->ds;
- struct phy_device *phydev;
- int port = dp->index;
- phy_interface_t mode;
- int err;
-
- err = of_phy_register_fixed_link(dn);
- if (err) {
- dev_err(ds->dev,
- "failed to register the fixed PHY of port %d\n",
- port);
- return err;
- }
-
- phydev = of_phy_find_device(dn);
-
- err = of_get_phy_mode(dn, &mode);
- if (err)
- mode = PHY_INTERFACE_MODE_NA;
- phydev->interface = mode;
-
- genphy_read_status(phydev);
-
- if (ds->ops->adjust_link)
- ds->ops->adjust_link(ds, port, phydev);
-
- put_device(&phydev->mdio.dev);
-
- return 0;
-}
-
static int dsa_shared_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
@@ -1819,7 +1695,7 @@ err_phy_connect:
* their type.
*
* User ports with no phy-handle or fixed-link are expected to connect to an
- * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to
+ * internal PHY located on the ds->user_mii_bus at an MDIO address equal to
* the port number. This description is still actively supported.
*
* Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to
@@ -1840,7 +1716,7 @@ err_phy_connect:
* a fixed-link, a phy-handle, or a managed = "in-band-status" property.
* It becomes the responsibility of the driver to ensure that these ports
* operate at the maximum speed (whatever this means) and will interoperate
- * with the DSA master or other cascade port, since phylink methods will not be
+ * with the DSA conduit or other cascade port, since phylink methods will not be
* invoked for them.
*
* If you are considering expanding this table for newly introduced switches,
@@ -1963,12 +1839,20 @@ static void dsa_shared_port_validate_of(struct dsa_port *dp,
dn, dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
}
+static void dsa_shared_port_link_down(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->phylink_mac_ops && ds->phylink_mac_ops->mac_link_down)
+ ds->phylink_mac_ops->mac_link_down(&dp->pl_config, MLO_AN_FIXED,
+ PHY_INTERFACE_MODE_NA);
+}
+
int dsa_shared_port_link_register_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
bool missing_link_description;
bool missing_phy_mode;
- int port = dp->index;
dsa_shared_port_validate_of(dp, &missing_phy_mode,
&missing_link_description);
@@ -1978,49 +1862,32 @@ int dsa_shared_port_link_register_of(struct dsa_port *dp)
dsa_switches_apply_workarounds))
return -EINVAL;
- if (!ds->ops->adjust_link) {
- if (missing_link_description) {
- dev_warn(ds->dev,
- "Skipping phylink registration for %s port %d\n",
- dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
- } else {
- if (ds->ops->phylink_mac_link_down)
- ds->ops->phylink_mac_link_down(ds, port,
- MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
+ if (missing_link_description) {
+ dev_warn(ds->dev,
+ "Skipping phylink registration for %s port %d\n",
+ dsa_port_is_cpu(dp) ? "CPU" : "DSA", dp->index);
+ } else {
+ dsa_shared_port_link_down(dp);
- return dsa_shared_port_phylink_register(dp);
- }
- return 0;
+ return dsa_shared_port_phylink_register(dp);
}
- dev_warn(ds->dev,
- "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
-
- if (of_phy_is_fixed_link(dp->dn))
- return dsa_shared_port_fixed_link_register_of(dp);
- else
- return dsa_shared_port_setup_phy_of(dp, true);
+ return 0;
}
void dsa_shared_port_link_unregister_of(struct dsa_port *dp)
{
- struct dsa_switch *ds = dp->ds;
-
- if (!ds->ops->adjust_link && dp->pl) {
+ if (dp->pl) {
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
return;
}
-
- if (of_phy_is_fixed_link(dp->dn))
- of_phy_deregister_fixed_link(dp->dn);
- else
- dsa_shared_port_setup_phy_of(dp, false);
}
-int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
+int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr,
+ struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dp->ds;
int err;
@@ -2030,7 +1897,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
dp->hsr_dev = hsr;
- err = ds->ops->port_hsr_join(ds, dp->index, hsr);
+ err = ds->ops->port_hsr_join(ds, dp->index, hsr, extack);
if (err)
dp->hsr_dev = NULL;
@@ -2042,6 +1909,9 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
struct dsa_switch *ds = dp->ds;
int err;
+ if (!dp->hsr_dev)
+ return;
+
dp->hsr_dev = NULL;
if (ds->ops->port_hsr_leave) {
diff --git a/net/dsa/port.h b/net/dsa/port.h
index 9c218660d223..6bc3291573c0 100644
--- a/net/dsa/port.h
+++ b/net/dsa/port.h
@@ -15,7 +15,7 @@ struct switchdev_obj_port_mdb;
struct switchdev_vlan_msti;
struct phy_device;
-bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr);
+bool dsa_port_supports_hwtstamp(struct dsa_port *dp);
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
const struct dsa_device_ops *tag_ops);
int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
@@ -103,12 +103,13 @@ int dsa_port_phylink_create(struct dsa_port *dp);
void dsa_port_phylink_destroy(struct dsa_port *dp);
int dsa_shared_port_link_register_of(struct dsa_port *dp);
void dsa_shared_port_link_unregister_of(struct dsa_port *dp);
-int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
+int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr,
+ struct netlink_ext_ack *extack);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
-int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
- struct netlink_ext_ack *extack);
+int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/net/dsa/slave.h b/net/dsa/slave.h
deleted file mode 100644
index d0abe609e00d..000000000000
--- a/net/dsa/slave.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef __DSA_SLAVE_H
-#define __DSA_SLAVE_H
-
-#include <linux/if_bridge.h>
-#include <linux/if_vlan.h>
-#include <linux/list.h>
-#include <linux/netpoll.h>
-#include <linux/types.h>
-#include <net/dsa.h>
-#include <net/gro_cells.h>
-
-struct net_device;
-struct netlink_ext_ack;
-
-extern struct notifier_block dsa_slave_switchdev_notifier;
-extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
-
-struct dsa_slave_priv {
- /* Copy of CPU port xmit for faster access in slave transmit hot path */
- struct sk_buff * (*xmit)(struct sk_buff *skb,
- struct net_device *dev);
-
- struct gro_cells gcells;
-
- /* DSA port data, such as switch, port index, etc. */
- struct dsa_port *dp;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct netpoll *netpoll;
-#endif
-
- /* TC context */
- struct list_head mall_tc_list;
-};
-
-void dsa_slave_mii_bus_init(struct dsa_switch *ds);
-int dsa_slave_create(struct dsa_port *dp);
-void dsa_slave_destroy(struct net_device *slave_dev);
-int dsa_slave_suspend(struct net_device *slave_dev);
-int dsa_slave_resume(struct net_device *slave_dev);
-int dsa_slave_register_notifier(void);
-void dsa_slave_unregister_notifier(void);
-void dsa_slave_sync_ha(struct net_device *dev);
-void dsa_slave_unsync_ha(struct net_device *dev);
-void dsa_slave_setup_tagger(struct net_device *slave);
-int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
-int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
- struct netlink_ext_ack *extack);
-int dsa_slave_manage_vlan_filtering(struct net_device *dev,
- bool vlan_filtering);
-
-static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
-
- return p->dp;
-}
-
-static inline struct net_device *
-dsa_slave_to_master(const struct net_device *dev)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_to_master(dp);
-}
-
-#endif
diff --git a/net/dsa/stubs.c b/net/dsa/stubs.c
new file mode 100644
index 000000000000..2ed8a6c85fbf
--- /dev/null
+++ b/net/dsa/stubs.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Stubs for DSA functionality called by the core network stack.
+ * These are necessary because CONFIG_NET_DSA can be a module, and built-in
+ * code cannot directly call symbols exported by modules.
+ */
+#include <net/dsa_stubs.h>
+
+const struct dsa_stubs *dsa_stubs;
+EXPORT_SYMBOL_GPL(dsa_stubs);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index d5bc4bb7310d..3d2feeea897b 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -15,9 +15,10 @@
#include "dsa.h"
#include "netlink.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
+#include "trace.h"
+#include "user.h"
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
@@ -164,14 +165,20 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp,
int err = 0;
/* No need to bother with refcounting for user ports */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_mdb_add(ds, port, mdb, db);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_mdb_add(ds, port, mdb, db);
+ trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
+
+ return err;
+ }
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
if (a) {
refcount_inc(&a->refcount);
+ trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db,
+ &a->refcount);
goto out;
}
@@ -182,6 +189,7 @@ static int dsa_port_do_mdb_add(struct dsa_port *dp,
}
err = ds->ops->port_mdb_add(ds, port, mdb, db);
+ trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
if (err) {
kfree(a);
goto out;
@@ -209,21 +217,30 @@ static int dsa_port_do_mdb_del(struct dsa_port *dp,
int err = 0;
/* No need to bother with refcounting for user ports */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_mdb_del(ds, port, mdb, db);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_mdb_del(ds, port, mdb, db);
+ trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
+
+ return err;
+ }
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
if (!a) {
+ trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db);
err = -ENOENT;
goto out;
}
- if (!refcount_dec_and_test(&a->refcount))
+ if (!refcount_dec_and_test(&a->refcount)) {
+ trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db,
+ &a->refcount);
goto out;
+ }
err = ds->ops->port_mdb_del(ds, port, mdb, db);
+ trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
@@ -247,14 +264,19 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
int err = 0;
/* No need to bother with refcounting for user ports */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_fdb_add(ds, port, addr, vid, db);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
+ trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
+
+ return err;
+ }
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
if (a) {
refcount_inc(&a->refcount);
+ trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount);
goto out;
}
@@ -265,6 +287,7 @@ static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
}
err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
+ trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
if (err) {
kfree(a);
goto out;
@@ -291,21 +314,29 @@ static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
int err = 0;
/* No need to bother with refcounting for user ports */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_fdb_del(ds, port, addr, vid, db);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
+ trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
+
+ return err;
+ }
mutex_lock(&dp->addr_lists_lock);
a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
if (!a) {
+ trace_dsa_fdb_del_not_found(dp, addr, vid, &db);
err = -ENOENT;
goto out;
}
- if (!refcount_dec_and_test(&a->refcount))
+ if (!refcount_dec_and_test(&a->refcount)) {
+ trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount);
goto out;
+ }
err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
+ trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
@@ -332,6 +363,8 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
if (a) {
refcount_inc(&a->refcount);
+ trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db,
+ &a->refcount);
goto out;
}
@@ -342,6 +375,7 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
}
err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
+ trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err);
if (err) {
kfree(a);
goto out;
@@ -370,14 +404,19 @@ static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
if (!a) {
+ trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db);
err = -ENOENT;
goto out;
}
- if (!refcount_dec_and_test(&a->refcount))
+ if (!refcount_dec_and_test(&a->refcount)) {
+ trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db,
+ &a->refcount);
goto out;
+ }
err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
+ trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err);
if (err) {
refcount_set(&a->refcount, 1);
goto out;
@@ -634,8 +673,8 @@ static bool dsa_port_host_vlan_match(struct dsa_port *dp,
return false;
}
-static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
- const struct switchdev_obj_port_vlan *vlan)
+struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_vlan *v;
@@ -656,8 +695,12 @@ static int dsa_port_do_vlan_add(struct dsa_port *dp,
int err = 0;
/* No need to bother with refcounting for user ports. */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_vlan_add(ds, port, vlan, extack);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_vlan_add(ds, port, vlan, extack);
+ trace_dsa_vlan_add_hw(dp, vlan, err);
+
+ return err;
+ }
/* No need to propagate on shared ports the existing VLANs that were
* re-notified after just the flags have changed. This would cause a
@@ -672,6 +715,7 @@ static int dsa_port_do_vlan_add(struct dsa_port *dp,
v = dsa_vlan_find(&dp->vlans, vlan);
if (v) {
refcount_inc(&v->refcount);
+ trace_dsa_vlan_add_bump(dp, vlan, &v->refcount);
goto out;
}
@@ -682,6 +726,7 @@ static int dsa_port_do_vlan_add(struct dsa_port *dp,
}
err = ds->ops->port_vlan_add(ds, port, vlan, extack);
+ trace_dsa_vlan_add_hw(dp, vlan, err);
if (err) {
kfree(v);
goto out;
@@ -706,21 +751,29 @@ static int dsa_port_do_vlan_del(struct dsa_port *dp,
int err = 0;
/* No need to bother with refcounting for user ports */
- if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
- return ds->ops->port_vlan_del(ds, port, vlan);
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
+ err = ds->ops->port_vlan_del(ds, port, vlan);
+ trace_dsa_vlan_del_hw(dp, vlan, err);
+
+ return err;
+ }
mutex_lock(&dp->vlans_lock);
v = dsa_vlan_find(&dp->vlans, vlan);
if (!v) {
+ trace_dsa_vlan_del_not_found(dp, vlan);
err = -ENOENT;
goto out;
}
- if (!refcount_dec_and_test(&v->refcount))
+ if (!refcount_dec_and_test(&v->refcount)) {
+ trace_dsa_vlan_del_drop(dp, vlan, &v->refcount);
goto out;
+ }
err = ds->ops->port_vlan_del(ds, port, vlan);
+ trace_dsa_vlan_del_hw(dp, vlan, err);
if (err) {
refcount_set(&v->refcount, 1);
goto out;
@@ -841,12 +894,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
* bits that depend on the tagger, such as the MTU.
*/
dsa_switch_for_each_user_port(dp, ds) {
- struct net_device *slave = dp->slave;
+ struct net_device *user = dp->user;
- dsa_slave_setup_tagger(slave);
+ dsa_user_setup_tagger(user);
/* rtnl_mutex is held in dsa_tree_change_tag_proto */
- dsa_slave_change_mtu(slave, slave->mtu);
+ dsa_user_change_mtu(user, user->mtu);
}
return 0;
@@ -907,13 +960,13 @@ dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
}
static int
-dsa_switch_master_state_change(struct dsa_switch *ds,
- struct dsa_notifier_master_state_info *info)
+dsa_switch_conduit_state_change(struct dsa_switch *ds,
+ struct dsa_notifier_conduit_state_info *info)
{
- if (!ds->ops->master_state_change)
+ if (!ds->ops->conduit_state_change)
return 0;
- ds->ops->master_state_change(ds, info->master, info->operational);
+ ds->ops->conduit_state_change(ds, info->conduit, info->operational);
return 0;
}
@@ -1003,8 +1056,8 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
err = dsa_switch_tag_8021q_vlan_del(ds, info);
break;
- case DSA_NOTIFIER_MASTER_STATE_CHANGE:
- err = dsa_switch_master_state_change(ds, info);
+ case DSA_NOTIFIER_CONDUIT_STATE_CHANGE:
+ err = dsa_switch_conduit_state_change(ds, info);
break;
default:
err = -EOPNOTSUPP;
diff --git a/net/dsa/switch.h b/net/dsa/switch.h
index 15e67b95eb6e..be0a2749cd97 100644
--- a/net/dsa/switch.h
+++ b/net/dsa/switch.h
@@ -34,7 +34,7 @@ enum {
DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
- DSA_NOTIFIER_MASTER_STATE_CHANGE,
+ DSA_NOTIFIER_CONDUIT_STATE_CHANGE,
};
/* DSA_NOTIFIER_AGEING_TIME */
@@ -105,12 +105,15 @@ struct dsa_notifier_tag_8021q_vlan_info {
u16 vid;
};
-/* DSA_NOTIFIER_MASTER_STATE_CHANGE */
-struct dsa_notifier_master_state_info {
- const struct net_device *master;
+/* DSA_NOTIFIER_CONDUIT_STATE_CHANGE */
+struct dsa_notifier_conduit_state_info {
+ const struct net_device *conduit;
bool operational;
};
+struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
+ const struct switchdev_obj_port_vlan *vlan);
+
int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
int dsa_broadcast(unsigned long e, void *v);
diff --git a/net/dsa/tag.c b/net/dsa/tag.c
index b2fba1a003ce..79ad105902d9 100644
--- a/net/dsa/tag.c
+++ b/net/dsa/tag.c
@@ -13,8 +13,8 @@
#include <net/dsa.h>
#include <net/dst_metadata.h>
-#include "slave.h"
#include "tag.h"
+#include "user.h"
static LIST_HEAD(dsa_tag_drivers_list);
static DEFINE_MUTEX(dsa_tag_drivers_lock);
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(dsa_tag_drivers_lock);
* switch, the DSA driver owning the interface to which the packet is
* delivered is never notified unless we do so here.
*/
-static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
+static bool dsa_skb_defer_rx_timestamp(struct dsa_user_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
@@ -57,7 +57,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
- struct dsa_slave_priv *p;
+ struct dsa_user_priv *p;
if (unlikely(!cpu_dp)) {
kfree_skb(skb);
@@ -75,7 +75,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
@@ -94,7 +94,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (unlikely(!dsa_slave_dev_check(skb->dev))) {
+ if (unlikely(!dsa_user_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
@@ -105,8 +105,9 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
p = netdev_priv(skb->dev);
- if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
- nskb = dsa_untag_bridge_pvid(skb);
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid ||
+ cpu_dp->ds->untag_vlan_aware_bridge_pvid)) {
+ nskb = dsa_software_vlan_untag(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
@@ -114,7 +115,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb = nskb;
}
- dev_sw_netstats_rx_add(skb->dev, skb->len);
+ dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index 7cfbca824f1c..cf52283fe9df 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -9,7 +9,7 @@
#include <net/dsa.h>
#include "port.h"
-#include "slave.h"
+#include "user.h"
struct dsa_tag_driver {
const struct dsa_device_ops *ops;
@@ -29,7 +29,7 @@ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
return ops->needed_headroom + ops->needed_tailroom;
}
-static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
int device, int port)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -39,51 +39,86 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
- return dp->slave;
+ return dp->user;
return NULL;
}
-/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
- * frames as untagged, since the bridge will not untag them.
+/**
+ * dsa_software_untag_vlan_aware_bridge: Software untagging for VLAN-aware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge can process tagged packets. Software like STP/PTP may not. The
+ * bridge can also process untagged packets, to the same effect as if they were
+ * tagged with the PVID of the ingress port. So packets tagged with the PVID of
+ * the bridge port must be software-untagged, to support both use cases.
*/
-static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+static inline void dsa_software_untag_vlan_aware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
{
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *dev = skb->dev;
- struct net_device *upper_dev;
- u16 vid, pvid, proto;
+ u16 pvid, proto;
int err;
- if (!br || br_vlan_enabled(br))
- return skb;
-
err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- /* Move VLAN tag from data to hwaccel */
- if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
- skb = skb_vlan_untag(skb);
- if (!skb)
- return NULL;
- }
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
- if (!skb_vlan_tag_present(skb))
- return skb;
+ if (vid == pvid && skb->vlan_proto == htons(proto))
+ __vlan_hwaccel_clear_tag(skb);
+}
- vid = skb_vlan_tag_get_id(skb);
+/**
+ * dsa_software_untag_vlan_unaware_bridge: Software untagging for VLAN-unaware bridge
+ * @skb: Pointer to received socket buffer (packet)
+ * @br: Pointer to bridge upper interface of ingress port
+ * @vid: Parsed VID from packet
+ *
+ * The bridge ignores all VLAN tags. Software like STP/PTP may not (it may run
+ * on the plain port, or on a VLAN upper interface). Maybe packets are coming
+ * to software as tagged with a driver-defined VID which is NOT equal to the
+ * PVID of the bridge port (since the bridge is VLAN-unaware, its configuration
+ * should NOT be committed to hardware). DSA needs a method for this private
+ * VID to be communicated by software to it, and if packets are tagged with it,
+ * software-untag them. Note: the private VID may be different per bridge, to
+ * support the FDB isolation use case.
+ *
+ * FIXME: this is currently implemented based on the broken assumption that
+ * the "private VID" used by the driver in VLAN-unaware mode is equal to the
+ * bridge PVID. It should not be, except for a coincidence; the bridge PVID is
+ * irrelevant to the data path in the VLAN-unaware mode. Thus, the VID that
+ * this function removes is wrong.
+ *
+ * All users of ds->untag_bridge_pvid should fix their drivers, if necessary,
+ * to make the two independent. Only then, if there still remains a need to
+ * strip the private VID from packets, then a new ds->ops->get_private_vid()
+ * API shall be introduced to communicate to DSA what this VID is, which needs
+ * to be stripped here.
+ */
+static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
+ struct net_device *br,
+ u16 vid)
+{
+ struct net_device *upper_dev;
+ u16 pvid, proto;
+ int err;
- /* We already run under an RCU read-side critical section since
- * we are called from netif_receive_skb_list_internal().
- */
- err = br_vlan_get_pvid_rcu(dev, &pvid);
+ err = br_vlan_get_proto(br, &proto);
if (err)
- return skb;
+ return;
- if (vid != pvid)
- return skb;
+ err = br_vlan_get_pvid_rcu(skb->dev, &pvid);
+ if (err)
+ return;
+
+ if (vid != pvid || skb->vlan_proto != htons(proto))
+ return;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
@@ -95,10 +130,56 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
- if (upper_dev)
+ if (!upper_dev)
+ __vlan_hwaccel_clear_tag(skb);
+}
+
+/**
+ * dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Receive path method for switches which send some packets as VLAN-tagged
+ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
+ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
+ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
+ *
+ * As a side effect of this method, any VLAN tag from the skb head is moved
+ * to hwaccel.
+ */
+static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+ u16 vid, proto;
+ int err;
+
+ /* software untagging for standalone ports not yet necessary */
+ if (!br)
return skb;
- __vlan_hwaccel_clear_tag(skb);
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ if (br_vlan_enabled(br)) {
+ if (dp->ds->untag_vlan_aware_bridge_pvid)
+ dsa_software_untag_vlan_aware_bridge(skb, br, vid);
+ } else {
+ if (dp->ds->untag_bridge_pvid)
+ dsa_software_untag_vlan_unaware_bridge(skb, br, vid);
+ }
return skb;
}
@@ -107,12 +188,12 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* to support termination through the bridge.
*/
static inline struct net_device *
-dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
+dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct bridge_vlan_info vinfo;
- struct net_device *slave;
+ struct net_device *user;
struct dsa_port *dp;
int err;
@@ -134,13 +215,13 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
if (dp->cpu_dp != cpu_dp)
continue;
- slave = dp->slave;
+ user = dp->user;
- err = br_vlan_get_info_rcu(slave, vid, &vinfo);
+ err = br_vlan_get_info_rcu(user, vid, &vinfo);
if (err)
continue;
- return slave;
+ return user;
}
return NULL;
@@ -155,7 +236,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
*/
static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
skb->offload_fwd_mark = !!(dp->bridge);
}
@@ -215,9 +296,9 @@ static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
}
-/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
+/* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from
* skb_mac_header(skb), which leaves skb->data pointing at the first byte after
- * what the DSA master perceives as the EtherType (the beginning of the L3
+ * what the DSA conduit perceives as the EtherType (the beginning of the L3
* protocol). Since DSA EtherType header taggers treat the EtherType as part of
* the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
* is located 2 bytes behind skb->data. Note that EtherType in this context
@@ -229,7 +310,7 @@ static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
return skb->data - 2;
}
-/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
+/* On TX, skb->data points to the MAC header, which means that EtherType
* header taggers start exactly where the EtherType is (the EtherType is
* treated as part of the DSA header).
*/
@@ -238,6 +319,24 @@ static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
return skb->data + 2 * ETH_ALEN;
}
+static inline unsigned long dsa_xmit_port_mask(const struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned long mask = BIT(dp->index);
+
+ if (IS_ENABLED(CONFIG_HSR) &&
+ unlikely(dev->features & NETIF_F_HW_HSR_DUP)) {
+ struct net_device *hsr_dev = dp->hsr_dev;
+ struct dsa_port *other_dp;
+
+ dsa_hsr_foreach_port(other_dp, dp->ds, hsr_dev)
+ mask |= BIT(other_dp->index);
+ }
+
+ return mask;
+}
+
/* Create 2 modaliases per tagging protocol, one to auto-load the module
* given the ID reported by get_tag_protocol(), and the other by name.
*/
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 5ee9ef00954e..53e03fd8071b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -73,7 +73,7 @@ struct dsa_tag_8021q_vlan {
struct dsa_8021q_context {
struct dsa_switch *ds;
struct list_head vlans;
- /* EtherType of RX VID, used for filtering on master interface */
+ /* EtherType of RX VID, used for filtering on conduit interface */
__be16 proto;
};
@@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
err = ds->ops->tag_8021q_vlan_del(ds, port, vid);
if (err) {
- refcount_inc(&v->refcount);
+ refcount_set(&v->refcount, 1);
return err;
}
@@ -286,7 +286,8 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
* be used for VLAN-unaware bridging.
*/
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port);
u16 standalone_vid, bridge_vid;
@@ -304,6 +305,8 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false);
+ *tx_fwd_offload = true;
+
return 0;
}
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join);
@@ -338,7 +341,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
- struct net_device *master;
+ struct net_device *conduit;
int err;
/* The CPU port is implicitly configured by
@@ -347,7 +350,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return 0;
- master = dsa_port_to_master(dp);
+ conduit = dsa_port_to_conduit(dp);
err = dsa_port_tag_8021q_vlan_add(dp, vid, false);
if (err) {
@@ -357,8 +360,8 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
return err;
}
- /* Add the VLAN to the master's RX filter. */
- vlan_vid_add(master, ctx->proto, vid);
+ /* Add the VLAN to the conduit's RX filter. */
+ vlan_vid_add(conduit, ctx->proto, vid);
return err;
}
@@ -368,7 +371,7 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
- struct net_device *master;
+ struct net_device *conduit;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
@@ -376,11 +379,11 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- master = dsa_port_to_master(dp);
+ conduit = dsa_port_to_conduit(dp);
dsa_port_tag_8021q_vlan_del(dp, vid, false);
- vlan_vid_del(master, ctx->proto, vid);
+ vlan_vid_del(conduit, ctx->proto, vid);
}
static int dsa_tag_8021q_setup(struct dsa_switch *ds)
@@ -461,17 +464,17 @@ EXPORT_SYMBOL_GPL(dsa_tag_8021q_unregister);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci)
{
- /* skb->data points at skb_mac_header, which
- * is fine for vlan_insert_tag.
+ /* skb->data points at the MAC header, which is fine
+ * for vlan_insert_tag().
*/
return vlan_insert_tag(skb, htons(tpid), tci);
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
- int vbid)
+static struct net_device *
+dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
@@ -490,35 +493,96 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
continue;
if (dsa_port_bridge_num_get(dp) == vbid)
- return dp->slave;
+ return dp->user;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid)
+{
+ /* Always prefer precise source port information, if available */
+ if (source_port != -1 && switch_id != -1)
+ return dsa_conduit_find_user(conduit, switch_id, source_port);
+ else if (vbid >= 1)
+ return dsa_tag_8021q_find_port_by_vbid(conduit, vbid);
+
+ return dsa_find_designated_bridge_port_by_vid(conduit, vid);
+}
+EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_user);
+
+/**
+ * dsa_8021q_rcv - Decode source information from tag_8021q header
+ * @skb: RX socket buffer
+ * @source_port: pointer to storage for precise source port information.
+ * If this is known already from outside tag_8021q, the pre-initialized
+ * value is preserved. If not known, pass -1.
+ * @switch_id: similar to source_port.
+ * @vbid: pointer to storage for imprecise bridge ID. Must be pre-initialized
+ * with -1. If a positive value is returned, the source_port and switch_id
+ * are invalid.
+ * @vid: pointer to storage for original VID, in case tag_8021q decoding failed.
+ *
+ * If the packet has a tag_8021q header, decode it and set @source_port,
+ * @switch_id and @vbid, and strip the header. Otherwise set @vid and keep the
+ * header in the hwaccel area of the packet.
+ */
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid)
+ int *vbid, int *vid)
{
- u16 vid, tci;
+ int tmp_source_port, tmp_switch_id, tmp_vbid;
+ __be16 vlan_proto;
+ u16 tmp_vid, tci;
if (skb_vlan_tag_present(skb)) {
+ vlan_proto = skb->vlan_proto;
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+
+ vlan_proto = hdr->h_vlan_proto;
skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
skb_pull_rcsum(skb, ETH_HLEN);
}
- vid = tci & VLAN_VID_MASK;
+ tmp_vid = tci & VLAN_VID_MASK;
+ if (!vid_is_dsa_8021q(tmp_vid)) {
+ /* Not a tag_8021q frame, so return the VID to the
+ * caller for further processing, and put the tag back
+ */
+ if (vid)
+ *vid = tmp_vid;
+
+ __vlan_hwaccel_put_tag(skb, vlan_proto, tci);
+
+ return;
+ }
- *source_port = dsa_8021q_rx_source_port(vid);
- *switch_id = dsa_8021q_rx_switch_id(vid);
+ tmp_source_port = dsa_8021q_rx_source_port(tmp_vid);
+ tmp_switch_id = dsa_8021q_rx_switch_id(tmp_vid);
+ tmp_vbid = dsa_tag_8021q_rx_vbid(tmp_vid);
+
+ /* Precise source port information is unknown when receiving from a
+ * VLAN-unaware bridging domain, and tmp_source_port and tmp_switch_id
+ * are zeroes in this case.
+ *
+ * Preserve the source information from hardware-specific mechanisms,
+ * if available. This allows us to not overwrite a valid source port
+ * and switch ID with less precise values.
+ */
+ if (tmp_vbid == 0 && *source_port == -1)
+ *source_port = tmp_source_port;
+ if (tmp_vbid == 0 && *switch_id == -1)
+ *switch_id = tmp_switch_id;
if (vbid)
- *vbid = dsa_tag_8021q_rx_vbid(vid);
+ *vbid = tmp_vbid;
skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ return;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h
index b75cbaa028ef..27b8906f99ec 100644
--- a/net/dsa/tag_8021q.h
+++ b/net/dsa/tag_8021q.h
@@ -14,10 +14,11 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
- int *vbid);
+ int *vbid, int *vid);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
- int vbid);
+struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit,
+ int source_port, int switch_id,
+ int vid, int vbid);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_tag_8021q_vlan_info *info);
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
index 7f3b7d730b85..cbb588ca73aa 100644
--- a/net/dsa/tag_ar9331.c
+++ b/net/dsa/tag_ar9331.c
@@ -29,7 +29,7 @@
static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__le16 *phdr;
u16 hdr;
@@ -74,7 +74,7 @@ static struct sk_buff *ar9331_tag_rcv(struct sk_buff *skb,
/* Get source port information */
port = FIELD_GET(AR9331_HDR_PORT_NUM_MASK, hdr);
- skb->dev = dsa_master_find_slave(ndev, 0, port);
+ skb->dev = dsa_conduit_find_user(ndev, 0, port);
if (!skb->dev)
return NULL;
@@ -89,6 +89,7 @@ static const struct dsa_device_ops ar9331_netdev_ops = {
.needed_headroom = AR9331_HDR_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for Atheros AR9331 SoC with built-in switch");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_AR9331, AR9331_NAME);
module_dsa_tag_driver(ar9331_netdev_ops);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 10239daa5745..cf9420439054 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -7,6 +7,7 @@
#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -14,6 +15,7 @@
#define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy"
+#define BRCM_LEGACY_FCS_NAME "brcm-legacy-fcs"
#define BRCM_PREPEND_NAME "brcm-prepend"
/* Legacy Broadcom tag (6 bytes) */
@@ -31,6 +33,10 @@
#define BRCM_LEG_MULTICAST (1 << 5)
#define BRCM_LEG_EGRESS (2 << 5)
#define BRCM_LEG_INGRESS (3 << 5)
+#define BRCM_LEG_LEN_HI(x) (((x) >> 8) & 0x7)
+
+/* 4th byte in the tag */
+#define BRCM_LEG_LEN_LO(x) ((x) & 0xff)
/* 6th byte in the tag */
#define BRCM_LEG_PORT_ID (0xf)
@@ -84,8 +90,9 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
struct net_device *dev,
unsigned int offset)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u16 queue = skb_get_queue_mapping(skb);
+ u16 port_mask;
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
@@ -95,7 +102,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
- * Let dsa_slave_xmit() free the SKB
+ * Let dsa_user_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
return NULL;
@@ -113,12 +120,11 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) |
((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT);
brcm_tag[1] = 0;
- brcm_tag[2] = 0;
- if (dp->index == 8)
- brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
- brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK;
+ port_mask = dsa_xmit_port_mask(skb, dev);
+ brcm_tag[2] = (port_mask >> 8) & BRCM_IG_DSTMAP2_MASK;
+ brcm_tag[3] = port_mask & BRCM_IG_DSTMAP1_MASK;
- /* Now tell the master network device about the desired output queue
+ /* Now tell the conduit network device about the desired output queue
* as well
*/
skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
@@ -163,14 +169,15 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
/* Locate which port this is coming from */
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
- dsa_default_offload_fwd_mark(skb);
+ if (likely(!is_link_local_ether_addr(eth_hdr(skb)->h_dest)))
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
@@ -211,11 +218,53 @@ DSA_TAG_DRIVER(brcm_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) || \
+ IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int len = BRCM_LEG_TAG_LEN;
+ int source_port;
+ __be16 *proto;
+ u8 *brcm_tag;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ return NULL;
+
+ brcm_tag = dsa_etype_header_pos_rx(skb);
+ proto = (__be16 *)(brcm_tag + BRCM_LEG_TAG_LEN);
+
+ source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /* The internal switch in BCM63XX SoCs always tags on egress on the CPU
+ * port. We use VID 0 internally for untagged traffic, so strip the tag
+ * if the TCI field is all 0, and keep it otherwise to also retain
+ * e.g. 802.1p tagged packets.
+ */
+ if (proto[0] == htons(ETH_P_8021Q) && proto[1] == 0)
+ len += VLAN_HLEN;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, len);
+
+ if (likely(!is_link_local_ether_addr(eth_hdr(skb)->h_dest)))
+ dsa_default_offload_fwd_mark(skb);
+
+ dsa_strip_etype_header(skb, len);
+
+ return skb;
+}
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY || CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
+
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
@@ -225,7 +274,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
- * Let dsa_slave_xmit() free the SKB
+ * Let dsa_user_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
@@ -249,44 +298,77 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
return skb;
}
-static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
- struct net_device *dev)
+static const struct dsa_device_ops brcm_legacy_netdev_ops = {
+ .name = BRCM_LEGACY_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY,
+ .xmit = brcm_leg_tag_xmit,
+ .rcv = brcm_leg_tag_rcv,
+ .needed_headroom = BRCM_LEG_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_fcs_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int source_port;
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned int fcs_len;
+ __le32 fcs_val;
u8 *brcm_tag;
- if (unlikely(!pskb_may_pull(skb, BRCM_LEG_PORT_ID)))
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 70 bytes (including FCS
+ * and tag) because the length verification is done after the Broadcom
+ * tag is stripped off the ingress packet.
+ *
+ * Let dsa_user_xmit() free the SKB.
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
- brcm_tag = dsa_etype_header_pos_rx(skb);
+ fcs_len = skb->len;
+ fcs_val = cpu_to_le32(crc32_le(~0, skb->data, fcs_len) ^ ~0);
- source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+ skb_push(skb, BRCM_LEG_TAG_LEN);
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
- if (!skb->dev)
- return NULL;
+ dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN);
- /* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
+ brcm_tag = skb->data + 2 * ETH_ALEN;
- dsa_default_offload_fwd_mark(skb);
+ /* Broadcom tag type */
+ brcm_tag[0] = BRCM_LEG_TYPE_HI;
+ brcm_tag[1] = BRCM_LEG_TYPE_LO;
- dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
+ /* Broadcom tag value */
+ brcm_tag[2] = BRCM_LEG_EGRESS | BRCM_LEG_LEN_HI(fcs_len);
+ brcm_tag[3] = BRCM_LEG_LEN_LO(fcs_len);
+ brcm_tag[4] = 0;
+ brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID;
+
+ /* Original FCS value */
+ if (__skb_pad(skb, ETH_FCS_LEN, false))
+ return NULL;
+ skb_put_data(skb, &fcs_val, ETH_FCS_LEN);
return skb;
}
-static const struct dsa_device_ops brcm_legacy_netdev_ops = {
- .name = BRCM_LEGACY_NAME,
- .proto = DSA_TAG_PROTO_BRCM_LEGACY,
- .xmit = brcm_leg_tag_xmit,
+static const struct dsa_device_ops brcm_legacy_fcs_netdev_ops = {
+ .name = BRCM_LEGACY_FCS_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY_FCS,
+ .xmit = brcm_leg_fcs_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
-DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
-#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+DSA_TAG_DRIVER(brcm_legacy_fcs_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY_FCS, BRCM_LEGACY_FCS_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
@@ -322,6 +404,9 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
&DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops),
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+ &DSA_TAG_DRIVER_NAME(brcm_legacy_fcs_netdev_ops),
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
#endif
@@ -329,4 +414,5 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
module_dsa_tag_drivers(dsa_tag_driver_array);
+MODULE_DESCRIPTION("DSA tag driver for Broadcom switches using in-frame headers");
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 1fd7fa26db64..2a2c4fb61a65 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -129,7 +129,7 @@ enum dsa_code {
static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct net_device *br_dev;
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
@@ -267,14 +267,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
skb->dev = lag ? lag->dev : NULL;
} else {
- skb->dev = dsa_master_find_slave(dev, source_device,
+ skb->dev = dsa_conduit_find_user(dev, source_device,
source_port);
}
if (!skb->dev)
return NULL;
- /* When using LAG offload, skb->dev is not a DSA slave interface,
+ /* When using LAG offload, skb->dev is not a DSA user interface,
* so we cannot call dsa_default_offload_fwd_mark and we need to
* special-case it.
*/
@@ -406,4 +406,5 @@ static struct dsa_tag_driver *dsa_tag_drivers[] = {
module_dsa_tag_drivers(dsa_tag_drivers);
+MODULE_DESCRIPTION("DSA tag driver for Marvell switches using DSA headers");
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index e279cd9057b0..5fa436121087 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -48,8 +48,7 @@
/* Byte 3 */
#define GSWIP_TX_DPID_EN BIT(0)
-#define GSWIP_TX_PORT_MAP_SHIFT 1
-#define GSWIP_TX_PORT_MAP_MASK GENMASK(6, 1)
+#define GSWIP_TX_PORT_MAP GENMASK(6, 1)
#define GSWIP_RX_HEADER_LEN 8
@@ -61,7 +60,6 @@
static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *gswip_tag;
skb_push(skb, GSWIP_TX_HEADER_LEN);
@@ -70,7 +68,7 @@ static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
gswip_tag[0] = GSWIP_TX_SLPID_CPU;
gswip_tag[1] = GSWIP_TX_DPID_ELAN;
gswip_tag[2] = GSWIP_TX_PORT_MAP_EN | GSWIP_TX_PORT_MAP_SEL;
- gswip_tag[3] = BIT(dp->index + GSWIP_TX_PORT_MAP_SHIFT) & GSWIP_TX_PORT_MAP_MASK;
+ gswip_tag[3] = FIELD_PREP(GSWIP_TX_PORT_MAP, dsa_xmit_port_mask(skb, dev));
gswip_tag[3] |= GSWIP_TX_DPID_EN;
return skb;
@@ -89,7 +87,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
/* Get source port information */
port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -107,6 +105,7 @@ static const struct dsa_device_ops gswip_netdev_ops = {
.needed_headroom = GSWIP_RX_HEADER_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for Lantiq / Intel GSWIP switches");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_GSWIP, GSWIP_NAME);
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index 03a1fb9c87a9..544ab15685a2 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -20,7 +20,6 @@
static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *tag;
/* Calculate checksums (if required) before adding the trailer tag to
@@ -33,7 +32,7 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
/* Tag encoding */
tag = skb_put(skb, HELLCREEK_TAG_LEN);
- *tag = BIT(dp->index);
+ *tag = dsa_xmit_port_mask(skb, dev);
return skb;
}
@@ -45,7 +44,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN;
unsigned int port = tag[0] & 0x03;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
netdev_warn_once(dev, "Failed to get source port: %d\n", port);
return NULL;
@@ -67,6 +66,7 @@ static const struct dsa_device_ops hellcreek_netdev_ops = {
.needed_tailroom = HELLCREEK_TAG_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for Hirschmann Hellcreek TSN switches");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_HELLCREEK, HELLCREEK_NAME);
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 080e5c369f5b..9170a0148cc4 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -4,8 +4,10 @@
* Copyright (c) 2017 Microchip Technology
*/
+#include <linux/dsa/ksz_common.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
+#include <linux/ptp_classify.h>
#include <net/dsa.h>
#include "tag.h"
@@ -16,14 +18,76 @@
#define LAN937X_NAME "lan937x"
/* Typically only one byte is used for tail tag. */
+#define KSZ_PTP_TAG_LEN 4
#define KSZ_EGRESS_TAG_LEN 1
#define KSZ_INGRESS_TAG_LEN 1
+#define KSZ_HWTS_EN 0
+
+struct ksz_tagger_private {
+ struct ksz_tagger_data data; /* Must be first */
+ unsigned long state;
+ struct kthread_worker *xmit_worker;
+};
+
+static struct ksz_tagger_private *
+ksz_tagger_private(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+static void ksz_hwtstamp_set_state(struct dsa_switch *ds, bool on)
+{
+ struct ksz_tagger_private *priv = ksz_tagger_private(ds);
+
+ if (on)
+ set_bit(KSZ_HWTS_EN, &priv->state);
+ else
+ clear_bit(KSZ_HWTS_EN, &priv->state);
+}
+
+static void ksz_disconnect(struct dsa_switch *ds)
+{
+ struct ksz_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int ksz_connect(struct dsa_switch *ds)
+{
+ struct ksz_tagger_data *tagger_data;
+ struct kthread_worker *xmit_worker;
+ struct ksz_tagger_private *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
+ ds->dst->index, ds->index);
+ if (IS_ERR(xmit_worker)) {
+ ret = PTR_ERR(xmit_worker);
+ kfree(priv);
+ return ret;
+ }
+
+ priv->xmit_worker = xmit_worker;
+ /* Export functions for switch driver use */
+ tagger_data = &priv->data;
+ tagger_data->hwtstamp_set_state = ksz_hwtstamp_set_state;
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
struct net_device *dev,
unsigned int port, unsigned int len)
{
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -47,27 +111,27 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
* DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
* tag0 : zero-based value represents port
- * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ * (eg, 0x0=port1, 0x2=port3, 0x3=port4)
*/
+#define KSZ8795_TAIL_TAG_EG_PORT_M GENMASK(1, 0)
#define KSZ8795_TAIL_TAG_OVERRIDE BIT(6)
#define KSZ8795_TAIL_TAG_LOOKUP BIT(7)
static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct ethhdr *hdr;
u8 *tag;
- u8 *addr;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
- addr = skb_mac_header(skb);
+ hdr = skb_eth_hdr(skb);
- *tag = 1 << dp->index;
- if (is_link_local_ether_addr(addr))
+ *tag = dsa_xmit_port_mask(skb, dev);
+ if (is_link_local_ether_addr(hdr->h_dest))
*tag |= KSZ8795_TAIL_TAG_OVERRIDE;
return skb;
@@ -75,9 +139,15 @@ static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *ksz8795_rcv(struct sk_buff *skb, struct net_device *dev)
{
- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ u8 *tag;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- return ksz_common_rcv(skb, dev, tag[0] & 7, KSZ_EGRESS_TAG_LEN);
+ return ksz_common_rcv(skb, dev, tag[0] & KSZ8795_TAIL_TAG_EG_PORT_M,
+ KSZ_EGRESS_TAG_LEN);
}
static const struct dsa_device_ops ksz8795_netdev_ops = {
@@ -92,63 +162,166 @@ DSA_TAG_DRIVER(ksz8795_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
/*
- * For Ingress (Host -> KSZ9477), 2 bytes are added before FCS.
+ * For Ingress (Host -> KSZ9477), 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
+ * FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : Prioritization (not used now)
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
*
- * For Egress (KSZ9477 -> Host), 1 byte is added before FCS.
+ * For Egress (KSZ9477 -> Host), 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x06=port7)
*/
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
+#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
+static void ksz_rcv_timestamp(struct sk_buff *skb, u8 *tag)
+{
+ u8 *tstamp_raw = tag - KSZ_PTP_TAG_LEN;
+ ktime_t tstamp;
+
+ tstamp = ksz_decode_tstamp(get_unaligned_be32(tstamp_raw));
+ KSZ_SKB_CB(skb)->tstamp = tstamp;
+}
+
+/* Time stamp tag *needs* to be inserted if PTP is enabled in hardware.
+ * Regardless of Whether it is a PTP frame or not.
+ */
+static void ksz_xmit_timestamp(struct dsa_port *dp, struct sk_buff *skb)
+{
+ struct ksz_tagger_private *priv;
+ struct ptp_header *ptp_hdr;
+ unsigned int ptp_type;
+ u32 tstamp_raw = 0;
+ s64 correction;
+
+ priv = ksz_tagger_private(dp->ds);
+
+ if (!test_bit(KSZ_HWTS_EN, &priv->state))
+ return;
+
+ if (!KSZ_SKB_CB(skb)->update_correction)
+ goto output_tag;
+
+ ptp_type = KSZ_SKB_CB(skb)->ptp_type;
+
+ ptp_hdr = ptp_parse_header(skb, ptp_type);
+ if (!ptp_hdr)
+ goto output_tag;
+
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+
+ if (correction < 0) {
+ struct timespec64 ts;
+
+ ts = ns_to_timespec64(-correction >> 16);
+ tstamp_raw = ((ts.tv_sec & 3) << 30) | ts.tv_nsec;
+
+ /* Set correction field to 0 and update UDP checksum */
+ ptp_header_update_correction(skb, ptp_type, ptp_hdr, 0);
+ }
+
+output_tag:
+ put_unaligned_be32(tstamp_raw, skb_put(skb, KSZ_PTP_TAG_LEN));
+}
+
+/* Defer transmit if waiting for egress time stamp is required. */
+static struct sk_buff *ksz_defer_xmit(struct dsa_port *dp, struct sk_buff *skb)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dp->ds);
+ struct ksz_tagger_private *priv = ksz_tagger_private(dp->ds);
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct sk_buff *clone = KSZ_SKB_CB(skb)->clone;
+ struct ksz_deferred_xmit_work *xmit_work;
+ struct kthread_worker *xmit_worker;
+
+ if (!clone)
+ return skb; /* no deferred xmit for this packet */
+
+ xmit_work_fn = tagger_data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
+ * won't really free the packet.
+ */
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(xmit_worker, &xmit_work->work);
+
+ return NULL;
+}
+
static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct ethhdr *hdr;
__be16 *tag;
- u8 *addr;
u16 val;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN);
- addr = skb_mac_header(skb);
+ hdr = skb_eth_hdr(skb);
- val = BIT(dp->index);
+ val = dsa_xmit_port_mask(skb, dev);
+ val |= FIELD_PREP(KSZ9477_TAIL_TAG_PRIO, prio);
- if (is_link_local_ether_addr(addr))
+ if (is_link_local_ether_addr(hdr->h_dest))
val |= KSZ9477_TAIL_TAG_OVERRIDE;
*tag = cpu_to_be16(val);
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
- /* Tag decoding */
- u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
unsigned int len = KSZ_EGRESS_TAG_LEN;
+ unsigned int port;
+ u8 *tag;
+
+ if (skb_linearize(skb))
+ return NULL;
+
+ /* Tag decoding */
+ tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
/* Extra 4-bytes PTP timestamp */
- if (tag[0] & KSZ9477_PTP_TAG_INDICATION)
- len += KSZ9477_PTP_TAG_LEN;
+ if (tag[0] & KSZ9477_PTP_TAG_INDICATION) {
+ ksz_rcv_timestamp(skb, tag);
+ len += KSZ_PTP_TAG_LEN;
+ }
return ksz_common_rcv(skb, dev, port, len);
}
@@ -158,35 +331,43 @@ static const struct dsa_device_ops ksz9477_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9477,
.xmit = ksz9477_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = KSZ9477_INGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = KSZ9477_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9477_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9477, KSZ9477_NAME);
+#define KSZ9893_TAIL_TAG_PRIO GENMASK(4, 3)
#define KSZ9893_TAIL_TAG_OVERRIDE BIT(5)
#define KSZ9893_TAIL_TAG_LOOKUP BIT(6)
static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- u8 *addr;
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct ethhdr *hdr;
u8 *tag;
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
/* Tag encoding */
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, KSZ_INGRESS_TAG_LEN);
- addr = skb_mac_header(skb);
+ hdr = skb_eth_hdr(skb);
- *tag = BIT(dp->index);
+ *tag = dsa_xmit_port_mask(skb, dev);
+ *tag |= FIELD_PREP(KSZ9893_TAIL_TAG_PRIO, prio);
- if (is_link_local_ether_addr(addr))
+ if (is_link_local_ether_addr(hdr->h_dest))
*tag |= KSZ9893_TAIL_TAG_OVERRIDE;
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops ksz9893_netdev_ops = {
@@ -194,23 +375,28 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
.proto = DSA_TAG_PROTO_KSZ9893,
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = KSZ_INGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = KSZ_INGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893, KSZ9893_NAME);
-/* For xmit, 2 bytes are added before FCS.
+/* For xmit, 2/6 bytes are added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|tag1(1byte)|
+ * FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if PTP is enabled in the Hardware)
* tag0 : represents tag override, lookup and valid
* tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x80=port8)
*
- * For rcv, 1 byte is added before FCS.
+ * For rcv, 1/5 bytes is added before FCS.
* ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|ts(4bytes)|tag0(1byte)|FCS(4bytes)
* ---------------------------------------------------------------------------
+ * ts : time stamp (Present only if bit 7 of tag0 is set)
* tag0 : zero-based value represents port
* (eg, 0x00=port1, 0x02=port3, 0x07=port8)
*/
@@ -219,12 +405,15 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ9893, KSZ9893_NAME);
#define LAN937X_TAIL_TAG_BLOCKING_OVERRIDE BIT(11)
#define LAN937X_TAIL_TAG_LOOKUP BIT(12)
#define LAN937X_TAIL_TAG_VALID BIT(13)
+#define LAN937X_TAIL_TAG_PRIO GENMASK(10, 8)
#define LAN937X_TAIL_TAG_PORT_MASK 7
static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u8 prio = netdev_txq_to_tc(dev, queue_mapping);
+ struct dsa_port *dp = dsa_user_to_port(dev);
const struct ethhdr *hdr = eth_hdr(skb);
__be16 *tag;
u16 val;
@@ -232,9 +421,12 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
+ ksz_xmit_timestamp(dp, skb);
+
tag = skb_put(skb, LAN937X_EGRESS_TAG_LEN);
- val = BIT(dp->index);
+ val = dsa_xmit_port_mask(skb, dev);
+ val |= FIELD_PREP(LAN937X_TAIL_TAG_PRIO, prio);
if (is_link_local_ether_addr(hdr->h_dest))
val |= LAN937X_TAIL_TAG_BLOCKING_OVERRIDE;
@@ -244,7 +436,7 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
put_unaligned_be16(val, tag);
- return skb;
+ return ksz_defer_xmit(dp, skb);
}
static const struct dsa_device_ops lan937x_netdev_ops = {
@@ -252,7 +444,9 @@ static const struct dsa_device_ops lan937x_netdev_ops = {
.proto = DSA_TAG_PROTO_LAN937X,
.xmit = lan937x_xmit,
.rcv = ksz9477_rcv,
- .needed_tailroom = LAN937X_EGRESS_TAG_LEN,
+ .connect = ksz_connect,
+ .disconnect = ksz_disconnect,
+ .needed_tailroom = LAN937X_EGRESS_TAG_LEN + KSZ_PTP_TAG_LEN,
};
DSA_TAG_DRIVER(lan937x_netdev_ops);
@@ -267,4 +461,5 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
module_dsa_tag_drivers(dsa_tag_driver_array);
+MODULE_DESCRIPTION("DSA tag driver for Microchip 8795/937x/9477/9893 families of switches");
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index c25f5536706b..258e5d7dc5ef 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -56,7 +56,7 @@ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr)
static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *lan9303_tag;
u16 tag;
@@ -99,7 +99,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
source_port = lan9303_tag1 & 0x3;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
return NULL;
@@ -119,6 +119,7 @@ static const struct dsa_device_ops lan9303_netdev_ops = {
.needed_headroom = LAN9303_TAG_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for SMSC/Microchip LAN9303 family of switches");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN9303, LAN9303_NAME);
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 40af80452747..dea3eecaf093 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -23,7 +23,7 @@
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 xmit_tpid;
u8 *mtk_tag;
@@ -54,7 +54,8 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
* whether that's a combined special tag with 802.1Q header.
*/
mtk_tag[0] = xmit_tpid;
- mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
+ mtk_tag[1] = FIELD_PREP(MTK_HDR_XMIT_DP_BIT_MASK,
+ dsa_xmit_port_mask(skb, dev));
/* Tag control information is kept for 802.1Q */
if (xmit_tpid == MTK_HDR_XMIT_UNTAGGED) {
@@ -85,7 +86,7 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev)
/* Get source port information */
port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -102,6 +103,7 @@ static const struct dsa_device_ops mtk_netdev_ops = {
.needed_headroom = MTK_HDR_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for Mediatek switches");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MTK, MTK_NAME);
diff --git a/net/dsa/tag_mxl-gsw1xx.c b/net/dsa/tag_mxl-gsw1xx.c
new file mode 100644
index 000000000000..60f7c445e656
--- /dev/null
+++ b/net/dsa/tag_mxl-gsw1xx.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DSA driver Special Tag support for MaxLinear GSW1xx switch chips
+ *
+ * Copyright (C) 2025 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2023 - 2024 MaxLinear Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+#include "tag.h"
+
+/* To define the outgoing port and to discover the incoming port a special
+ * tag is used by the GSW1xx.
+ *
+ * Dest MAC Src MAC special TAG EtherType
+ * ...| 1 2 3 4 5 6 | 1 2 3 4 5 6 | 1 2 3 4 5 6 7 8 | 1 2 |...
+ * |<--------------->|
+ */
+
+#define GSW1XX_TAG_NAME "gsw1xx"
+
+/* special tag header length (RX and TX) */
+#define GSW1XX_HEADER_LEN 8
+
+/* Word 0 = Ethertype -> 0x88C3 */
+
+/* Word 1 */
+#define GSW1XX_TX_PORT_MAP GENMASK(7, 0)
+#define GSW1XX_TX_PORT_MAP_EN BIT(15)
+#define GSW1XX_TX_CLASS_EN BIT(14)
+#define GSW1XX_TX_TIME_STAMP_EN BIT(13)
+#define GSW1XX_TX_LRN_DIS BIT(12)
+#define GSW1XX_TX_CLASS GENMASK(11, 8)
+
+/* special tag in RX path header */
+/* Word 2 */
+#define GSW1XX_RX_PORT_MAP GENMASK(15, 8)
+
+static struct sk_buff *gsw1xx_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ __be16 *gsw1xx_tag;
+ u16 tag;
+
+ /* provide additional space 'GSW1XX_HEADER_LEN' bytes */
+ skb_push(skb, GSW1XX_HEADER_LEN);
+
+ /* add space between MAC address and Ethertype */
+ dsa_alloc_etype_header(skb, GSW1XX_HEADER_LEN);
+
+ /* special tag ingress */
+ gsw1xx_tag = dsa_etype_header_pos_tx(skb);
+ gsw1xx_tag[0] = htons(ETH_P_MXLGSW);
+
+ tag = FIELD_PREP(GSW1XX_TX_PORT_MAP, dsa_xmit_port_mask(skb, dev)) |
+ GSW1XX_TX_PORT_MAP_EN | GSW1XX_TX_LRN_DIS;
+ gsw1xx_tag[1] = htons(tag);
+ gsw1xx_tag[2] = 0;
+ gsw1xx_tag[3] = 0;
+
+ return skb;
+}
+
+static struct sk_buff *gsw1xx_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int port;
+ __be16 *gsw1xx_tag;
+
+ if (unlikely(!pskb_may_pull(skb, GSW1XX_HEADER_LEN))) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull SKB\n");
+ return NULL;
+ }
+
+ gsw1xx_tag = dsa_etype_header_pos_rx(skb);
+
+ if (unlikely(ntohs(gsw1xx_tag[0]) != ETH_P_MXLGSW)) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid special tag\n");
+ dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag);
+ return NULL;
+ }
+
+ /* Get source port information */
+ port = FIELD_GET(GSW1XX_RX_PORT_MAP, ntohs(gsw1xx_tag[1]));
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
+ dev_warn_ratelimited(&dev->dev, "Tag: %8ph\n", gsw1xx_tag);
+ return NULL;
+ }
+
+ /* remove the GSW1xx special tag between MAC addresses and the current
+ * ethertype field.
+ */
+ skb_pull_rcsum(skb, GSW1XX_HEADER_LEN);
+ dsa_strip_etype_header(skb, GSW1XX_HEADER_LEN);
+
+ return skb;
+}
+
+static const struct dsa_device_ops gsw1xx_netdev_ops = {
+ .name = GSW1XX_TAG_NAME,
+ .proto = DSA_TAG_PROTO_MXL_GSW1XX,
+ .xmit = gsw1xx_tag_xmit,
+ .rcv = gsw1xx_tag_rcv,
+ .needed_headroom = GSW1XX_HEADER_LEN,
+};
+
+MODULE_DESCRIPTION("DSA tag driver for MaxLinear GSW1xx 8 byte protocol");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_MXL_GSW1XX, GSW1XX_TAG_NAME);
+
+module_dsa_tag_driver(gsw1xx_netdev_ops);
diff --git a/net/dsa/tag_none.c b/net/dsa/tag_none.c
index d2fd179c4227..e9c9670a9c44 100644
--- a/net/dsa/tag_none.c
+++ b/net/dsa/tag_none.c
@@ -12,8 +12,8 @@
#define NONE_NAME "none"
-static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static struct sk_buff *dsa_user_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
/* Just return the original SKB */
return skb;
@@ -22,9 +22,10 @@ static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
static const struct dsa_device_ops none_ops = {
.name = NONE_NAME,
.proto = DSA_TAG_PROTO_NONE,
- .xmit = dsa_slave_notag_xmit,
+ .xmit = dsa_user_notag_xmit,
};
module_dsa_tag_driver(none_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_NONE, NONE_NAME);
+MODULE_DESCRIPTION("DSA no-op tag driver");
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 28ebecafdd24..3405def79c2d 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -8,44 +8,10 @@
#define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville"
-/* If the port is under a VLAN-aware bridge, remove the VLAN header from the
- * payload and move it into the DSA tag, which will make the switch classify
- * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
- * which is the pvid of standalone and VLAN-unaware bridge ports.
- */
-static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
- u64 *vlan_tci, u64 *tag_type)
-{
- struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct vlan_ethhdr *hdr;
- u16 proto, tci;
-
- if (!br || !br_vlan_enabled(br)) {
- *vlan_tci = 0;
- *tag_type = IFH_TAG_TYPE_C;
- return;
- }
-
- hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
- br_vlan_get_proto(br, &proto);
-
- if (ntohs(hdr->h_vlan_proto) == proto) {
- __skb_vlan_pop(skb, &tci);
- *vlan_tci = tci;
- } else {
- rcu_read_lock();
- br_vlan_get_pvid_rcu(br, &tci);
- rcu_read_unlock();
- *vlan_tci = tci;
- }
-
- *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
-}
-
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
struct dsa_switch *ds = dp->ds;
u64 vlan_tci, tag_type;
void *injection;
@@ -53,7 +19,8 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
u32 rew_op = 0;
u64 qos_class;
- ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ ocelot_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci,
+ &tag_type);
qos_class = netdev_get_num_tc(netdev) ?
netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
@@ -79,11 +46,10 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x8880000a), &injection);
- ocelot_ifh_set_dest(injection, BIT_ULL(dp->index));
+ ocelot_ifh_set_dest(injection, dsa_xmit_port_mask(skb, netdev));
return skb;
}
@@ -91,11 +57,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *seville_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x88800005), &injection);
- seville_ifh_set_dest(injection, BIT_ULL(dp->index));
+ seville_ifh_set_dest(injection, dsa_xmit_port_mask(skb, netdev));
return skb;
}
@@ -111,12 +76,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
u16 vlan_tpid;
u64 rew_val;
- /* Revert skb->data by the amount consumed by the DSA master,
+ /* Revert skb->data by the amount consumed by the DSA conduit,
* so it points to the beginning of the frame.
*/
skb_push(skb, ETH_HLEN);
/* We don't care about the short prefix, it is just for easy entrance
- * into the DSA master's RX filter. Discard it now by moving it into
+ * into the DSA conduit's RX filter. Discard it now by moving it into
* the headroom.
*/
skb_pull(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -141,12 +106,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
ocelot_xfh_get_vlan_tci(extraction, &vlan_tci);
ocelot_xfh_get_rew_val(extraction, &rew_val);
- skb->dev = dsa_master_find_slave(netdev, 0, src_port);
+ skb->dev = dsa_conduit_find_user(netdev, 0, src_port);
if (!skb->dev)
/* The switch will reflect back some frames sent through
- * sockets opened on the bare DSA master. These will come back
+ * sockets opened on the bare DSA conduit. These will come back
* with src_port equal to the index of the CPU port, for which
- * there is no slave registered. So don't print any error
+ * there is no user registered. So don't print any error
* message here (ignore and drop those frames).
*/
return NULL;
@@ -170,7 +135,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
* equal to the pvid of the ingress port and should not be used for
* processing.
*/
- dp = dsa_slave_to_port(skb->dev);
+ dp = dsa_user_to_port(skb->dev);
vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q;
if (dsa_port_is_vlan_filtering(dp) &&
@@ -192,7 +157,7 @@ static const struct dsa_device_ops ocelot_netdev_ops = {
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(ocelot_netdev_ops);
@@ -204,7 +169,7 @@ static const struct dsa_device_ops seville_netdev_ops = {
.xmit = seville_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(seville_netdev_ops);
@@ -217,4 +182,5 @@ static struct dsa_tag_driver *ocelot_tag_driver_array[] = {
module_dsa_tag_drivers(ocelot_tag_driver_array);
+MODULE_DESCRIPTION("DSA tag driver for Ocelot family of switches, using NPI port");
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 1f0b8c20eba5..3929584791e4 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -37,8 +37,8 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
return NULL;
/* PTP over IP packets need UDP checksumming. We may have inherited
- * NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
- * through the DSA master, so calculate the checksum here.
+ * NETIF_F_HW_CSUM from the DSA conduit, but these packets are not sent
+ * through the DSA conduit, so calculate the checksum here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
@@ -49,7 +49,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
/* Calls felix_port_deferred_xmit in felix.c */
kthread_init_work(&xmit_work->work, xmit_work_fn);
- /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
@@ -63,7 +63,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -79,11 +79,11 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int src_port, switch_id;
+ int src_port = -1, switch_id = -1;
- dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
+ dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL);
- skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
+ skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port);
if (!skb->dev)
return NULL;
@@ -110,7 +110,7 @@ static int ocelot_connect(struct dsa_switch *ds)
if (!priv)
return -ENOMEM;
- priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ priv->xmit_worker = kthread_run_worker(0, "felix_xmit");
if (IS_ERR(priv->xmit_worker)) {
err = PTR_ERR(priv->xmit_worker);
kfree(priv);
@@ -130,9 +130,10 @@ static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.connect = ocelot_connect,
.disconnect = ocelot_disconnect,
.needed_headroom = VLAN_HLEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
+MODULE_DESCRIPTION("DSA tag driver for Ocelot family of switches, using VLAN");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_OCELOT_8021Q, OCELOT_8021Q_NAME);
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index e757c8de06f1..6d56a28c914c 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -14,7 +14,6 @@
static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 *phdr;
u16 hdr;
@@ -26,7 +25,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set the version field, and set destination port information */
hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
hdr |= QCA_HDR_XMIT_FROM_CPU;
- hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(dp->index));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, dsa_xmit_port_mask(skb, dev));
*phdr = htons(hdr);
@@ -75,17 +74,17 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
return NULL;
}
- /* Remove QCA tag and recalculate checksum */
- skb_pull_rcsum(skb, QCA_HDR_LEN);
- dsa_strip_etype_header(skb, QCA_HDR_LEN);
-
/* Get source port information */
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr);
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
+ /* Remove QCA tag and recalculate checksum */
+ skb_pull_rcsum(skb, QCA_HDR_LEN);
+ dsa_strip_etype_header(skb, QCA_HDR_LEN);
+
return skb;
}
@@ -116,9 +115,10 @@ static const struct dsa_device_ops qca_netdev_ops = {
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
.needed_headroom = QCA_HDR_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
+MODULE_DESCRIPTION("DSA tag driver for Qualcomm Atheros QCA8K switches");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_QCA, QCA_NAME);
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index c327314b95e3..3cc63eacfa03 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -23,7 +23,6 @@
#define RTL4_A_NAME "rtl4a"
#define RTL4_A_HDR_LEN 4
-#define RTL4_A_ETHERTYPE 0x8899
#define RTL4_A_PROTOCOL_SHIFT 12
/*
* 0x1 = Realtek Remote Control protocol (RRCP)
@@ -36,7 +35,7 @@
static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *p;
u8 *tag;
u16 out;
@@ -54,11 +53,11 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
/* Set Ethertype */
p = (__be16 *)tag;
- *p = htons(RTL4_A_ETHERTYPE);
+ *p = htons(ETH_P_REALTEK);
out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT);
/* The lower bits indicate the port number */
- out |= BIT(dp->index);
+ out |= dsa_xmit_port_mask(skb, dev);
p = (__be16 *)(tag + 2);
*p = htons(out);
@@ -82,7 +81,7 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
tag = dsa_etype_header_pos_rx(skb);
p = (__be16 *)tag;
etype = ntohs(*p);
- if (etype != RTL4_A_ETHERTYPE) {
+ if (etype != ETH_P_REALTEK) {
/* Not custom, just pass through */
netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype);
return skb;
@@ -97,9 +96,9 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
}
port = protport & 0xff;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
- netdev_dbg(dev, "could not find slave for port %d\n", port);
+ netdev_dbg(dev, "could not find user for port %d\n", port);
return NULL;
}
@@ -122,5 +121,6 @@ static const struct dsa_device_ops rtl4a_netdev_ops = {
};
module_dsa_tag_driver(rtl4a_netdev_ops);
+MODULE_DESCRIPTION("DSA tag driver for Realtek 4 byte protocol A tags");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A, RTL4_A_NAME);
diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c
index 4f67834fd121..2464545da4d2 100644
--- a/net/dsa/tag_rtl8_4.c
+++ b/net/dsa/tag_rtl8_4.c
@@ -103,7 +103,6 @@
static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
void *tag)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
__be16 tag16[RTL8_4_TAG_LEN / 2];
/* Set Realtek EtherType */
@@ -116,7 +115,7 @@ static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
tag16[2] = htons(FIELD_PREP(RTL8_4_LEARN_DIS, 1));
/* Zero ALLOW; set RX (CPU->switch) forwarding port mask */
- tag16[3] = htons(FIELD_PREP(RTL8_4_RX, BIT(dp->index)));
+ tag16[3] = htons(FIELD_PREP(RTL8_4_RX, dsa_xmit_port_mask(skb, dev)));
memcpy(tag, tag16, RTL8_4_TAG_LEN);
}
@@ -180,10 +179,10 @@ static int rtl8_4_read_tag(struct sk_buff *skb, struct net_device *dev,
/* Parse TX (switch->CPU) */
port = FIELD_GET(RTL8_4_TX, ntohs(tag16[3]));
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev,
- "could not find slave for port %d\n",
+ "could not find user for port %d\n",
port);
return -ENOENT;
}
@@ -258,4 +257,5 @@ static struct dsa_tag_driver *dsa_tag_drivers[] = {
};
module_dsa_tag_drivers(dsa_tag_drivers);
+MODULE_DESCRIPTION("DSA tag driver for Realtek 8 byte protocol 4 tags");
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_rzn1_a5psw.c b/net/dsa/tag_rzn1_a5psw.c
index 437a6820ac42..10994b3470f6 100644
--- a/net/dsa/tag_rzn1_a5psw.c
+++ b/net/dsa/tag_rzn1_a5psw.c
@@ -39,7 +39,6 @@ struct a5psw_tag {
static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
struct a5psw_tag *ptag;
u32 data2_val;
@@ -60,7 +59,7 @@ static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *de
ptag = dsa_etype_header_pos_tx(skb);
- data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, BIT(dp->index));
+ data2_val = FIELD_PREP(A5PSW_CTRL_DATA_PORT, dsa_xmit_port_mask(skb, dev));
ptag->ctrl_tag = htons(ETH_P_DSA_A5PSW);
ptag->ctrl_data = htons(A5PSW_CTRL_DATA_FORCE_FORWARD);
ptag->ctrl_data2_lo = htons(data2_val);
@@ -90,7 +89,7 @@ static struct sk_buff *a5psw_tag_rcv(struct sk_buff *skb,
port = FIELD_GET(A5PSW_CTRL_DATA_PORT, ntohs(tag->ctrl_data));
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -110,6 +109,7 @@ static const struct dsa_device_ops a5psw_netdev_ops = {
.needed_headroom = A5PSW_TAG_LEN,
};
+MODULE_DESCRIPTION("DSA tag driver for Renesas RZ/N1 A5PSW switch");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_A5PSW, A5PSW_NAME);
module_dsa_tag_driver(a5psw_netdev_ops);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 1c2ceba4771b..02adec693811 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -58,11 +58,8 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
-#define SJA1105_HWTS_RX_EN 0
-
struct sja1105_tagger_private {
struct sja1105_tagger_data data; /* Must be first */
- unsigned long state;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
*/
@@ -78,7 +75,7 @@ sja1105_tagger_private(struct dsa_switch *ds)
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
-static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+static bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
@@ -118,13 +115,13 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
* a unified unpacking command for both device series.
*/
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
- packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
- packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 4, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
+ packing(buf + 5, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
-static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+static bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
@@ -160,7 +157,7 @@ static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
- /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
@@ -213,7 +210,7 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp)
static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 tx_vid;
@@ -238,7 +235,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
/* Transform untagged control packets into pvid-tagged control packets so that
* all packets sent by this tagger are VLAN-tagged and we can configure the
- * switch to drop untagged packets coming from the DSA master.
+ * switch to drop untagged packets coming from the DSA conduit.
*/
static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
struct sk_buff *skb, u8 pcp)
@@ -256,7 +253,7 @@ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
return NULL;
}
- hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ hdr = skb_vlan_eth_hdr(skb);
/* If skb is already VLAN-tagged, leave that VLAN ID in place */
if (hdr->h_vlan_proto == xmit_tpid)
@@ -269,7 +266,7 @@ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -297,7 +294,7 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -386,16 +383,12 @@ static struct sk_buff
* Buffer it until we get its meta frame.
*/
if (is_link_local) {
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
priv = sja1105_tagger_private(ds);
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
- /* Do normal processing. */
- return skb;
-
spin_lock(&priv->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
@@ -403,7 +396,7 @@ static struct sk_buff
if (priv->stampable_skb) {
dev_err_ratelimited(ds->dev,
"Expected meta frame, is %12llx "
- "in the DSA master multicast filter?\n",
+ "in the DSA conduit multicast filter?\n",
SJA1105_META_DMAC);
kfree_skb(priv->stampable_skb);
}
@@ -424,19 +417,13 @@ static struct sk_buff
* frame, which serves no further purpose).
*/
} else if (is_meta) {
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
struct sk_buff *stampable_skb;
priv = sja1105_tagger_private(ds);
- /* Drop the meta frame if we're not in the right state
- * to process it.
- */
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
- return NULL;
-
spin_lock(&priv->meta_lock);
stampable_skb = priv->stampable_skb;
@@ -472,30 +459,6 @@ static struct sk_buff
return skb;
}
-static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
-{
- struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
-
- return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
-}
-
-static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
-{
- struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
-
- if (on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->state);
- else
- clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
-
- /* Initialize the meta state machine to a known state */
- if (!priv->stampable_skb)
- return;
-
- kfree_skb(priv->stampable_skb);
- priv->stampable_skb = NULL;
-}
-
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
@@ -509,69 +472,45 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb)
return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110;
}
-/* If the VLAN in the packet is a tag_8021q one, set @source_port and
- * @switch_id and strip the header. Otherwise set @vid and keep it in the
- * packet.
- */
-static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port,
- int *switch_id, int *vbid, u16 *vid)
-{
- struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
- u16 vlan_tci;
-
- if (skb_vlan_tag_present(skb))
- vlan_tci = skb_vlan_tag_get(skb);
- else
- vlan_tci = ntohs(hdr->h_vlan_TCI);
-
- if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK))
- return dsa_8021q_rcv(skb, source_port, switch_id, vbid);
-
- /* Try our best with imprecise RX */
- *vid = vlan_tci & VLAN_VID_MASK;
-}
-
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
struct sja1105_meta meta = {0};
struct ethhdr *hdr;
bool is_link_local;
bool is_meta;
- u16 vid;
hdr = eth_hdr(skb);
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
- if (sja1105_skb_has_tag_8021q(skb)) {
- /* Normal traffic path. */
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
- } else if (is_link_local) {
+ if (is_link_local) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.
*/
source_port = hdr->h_dest[3];
switch_id = hdr->h_dest[4];
- /* Clear the DMAC bytes that were mangled by the switch */
- hdr->h_dest[3] = 0;
- hdr->h_dest[4] = 0;
} else if (is_meta) {
sja1105_meta_unpack(skb, &meta);
source_port = meta.source_port;
switch_id = meta.switch_id;
- } else {
- return NULL;
}
- if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
- else
- skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ /* Normal data plane traffic and link-local frames are tagged with
+ * a tag_8021q VLAN which we have to strip
+ */
+ if (sja1105_skb_has_tag_8021q(skb))
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+ else if (source_port == -1 && switch_id == -1)
+ /* Packets with no source information have no chance of
+ * getting accepted, drop them straight away.
+ */
+ return NULL;
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -590,16 +529,16 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
struct sja1105_tagger_data *tagger_data;
- struct net_device *master = skb->dev;
+ struct net_device *conduit = skb->dev;
struct dsa_port *cpu_dp;
struct dsa_switch *ds;
int i;
- cpu_dp = master->dsa_ptr;
+ cpu_dp = conduit->dsa_ptr;
ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
if (!ds) {
net_err_ratelimited("%s: cannot find switch id %d\n",
- master->name, switch_id);
+ conduit->name, switch_id);
return NULL;
}
@@ -666,7 +605,7 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
/* skb->len counts from skb->data, while start_of_padding
* counts from the destination MAC address. Right now skb->data
- * is still as set by the DSA master, so to trim away the
+ * is still as set by the DSA conduit, so to trim away the
* padding and trailer we need to account for the fact that
* skb->data points to skb_mac_header(skb) + ETH_HLEN.
*/
@@ -694,9 +633,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct net_device *netdev)
{
- int source_port = -1, switch_id = -1, vbid = -1;
+ int source_port = -1, switch_id = -1, vbid = -1, vid = -1;
bool host_only = false;
- u16 vid = 0;
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
@@ -708,14 +646,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
/* Packets with in-band control extensions might still have RX VLANs */
if (likely(sja1105_skb_has_tag_8021q(skb)))
- sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid);
-
- if (vbid >= 1)
- skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
- else if (source_port == -1 || switch_id == -1)
- skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
- else
- skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id,
+ vid, vbid);
+
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -762,7 +697,6 @@ static void sja1105_disconnect(struct dsa_switch *ds)
static int sja1105_connect(struct dsa_switch *ds)
{
- struct sja1105_tagger_data *tagger_data;
struct sja1105_tagger_private *priv;
struct kthread_worker *xmit_worker;
int err;
@@ -773,7 +707,7 @@ static int sja1105_connect(struct dsa_switch *ds)
spin_lock_init(&priv->meta_lock);
- xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ xmit_worker = kthread_run_worker(0, "dsa%d:%d_xmit",
ds->dst->index, ds->index);
if (IS_ERR(xmit_worker)) {
err = PTR_ERR(xmit_worker);
@@ -782,10 +716,6 @@ static int sja1105_connect(struct dsa_switch *ds)
}
priv->xmit_worker = xmit_worker;
- /* Export functions for switch driver use */
- tagger_data = &priv->data;
- tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
- tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
ds->tagger_data = priv;
return 0;
@@ -800,7 +730,7 @@ static const struct dsa_device_ops sja1105_netdev_ops = {
.disconnect = sja1105_disconnect,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(sja1105_netdev_ops);
@@ -828,4 +758,5 @@ static struct dsa_tag_driver *sja1105_tag_driver_array[] = {
module_dsa_tag_drivers(sja1105_tag_driver_array);
+MODULE_DESCRIPTION("DSA tag driver for NXP SJA1105 switches");
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 7361b9106382..4dce24cfe6a7 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -14,12 +14,11 @@
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 4);
trailer[0] = 0x80;
- trailer[1] = 1 << dp->index;
+ trailer[1] = dsa_xmit_port_mask(skb, dev);
trailer[2] = 0x10;
trailer[3] = 0x00;
@@ -41,7 +40,7 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev)
source_port = trailer[1] & 7;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
@@ -59,6 +58,7 @@ static const struct dsa_device_ops trailer_netdev_ops = {
.needed_tailroom = 4,
};
+MODULE_DESCRIPTION("DSA tag driver for switches using a trailer tag");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_TRAILER, TRAILER_NAME);
diff --git a/net/dsa/tag_vsc73xx_8021q.c b/net/dsa/tag_vsc73xx_8021q.c
new file mode 100644
index 000000000000..af121a9aff7f
--- /dev/null
+++ b/net/dsa/tag_vsc73xx_8021q.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (C) 2024 Pawel Dembicki <paweldembicki@gmail.com>
+ */
+#include <linux/dsa/8021q.h>
+
+#include "tag.h"
+#include "tag_8021q.h"
+
+#define VSC73XX_8021Q_NAME "vsc73xx-8021q"
+
+static struct sk_buff *
+vsc73xx_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct dsa_port *dp = dsa_user_to_port(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+ u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
+ u8 pcp;
+
+ if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br_vlan_enabled(br))
+ return skb;
+
+ tx_vid = dsa_tag_8021q_bridge_vid(bridge_num);
+ }
+
+ pcp = netdev_txq_to_tc(netdev, queue_mapping);
+
+ return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
+ ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+}
+
+static struct sk_buff *
+vsc73xx_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ int src_port = -1, switch_id = -1, vbid = -1, vid = -1;
+
+ dsa_8021q_rcv(skb, &src_port, &switch_id, &vbid, &vid);
+
+ skb->dev = dsa_tag_8021q_find_user(netdev, src_port, switch_id,
+ vid, vbid);
+ if (!skb->dev) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port\n");
+ return NULL;
+ }
+
+ dsa_default_offload_fwd_mark(skb);
+
+ return skb;
+}
+
+static const struct dsa_device_ops vsc73xx_8021q_netdev_ops = {
+ .name = VSC73XX_8021Q_NAME,
+ .proto = DSA_TAG_PROTO_VSC73XX_8021Q,
+ .xmit = vsc73xx_xmit,
+ .rcv = vsc73xx_rcv,
+ .needed_headroom = VLAN_HLEN,
+ .promisc_on_conduit = true,
+};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DSA tag driver for VSC73XX family of switches, using VLAN");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_VSC73XX_8021Q, VSC73XX_8021Q_NAME);
+
+module_dsa_tag_driver(vsc73xx_8021q_netdev_ops);
diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c
index af19969f9bc4..a05219f702c6 100644
--- a/net/dsa/tag_xrs700x.c
+++ b/net/dsa/tag_xrs700x.c
@@ -13,16 +13,10 @@
static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *partner, *dp = dsa_slave_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 1);
- trailer[0] = BIT(dp->index);
-
- if (dp->hsr_dev)
- dsa_hsr_foreach_port(partner, dp->ds, dp->hsr_dev)
- if (partner != dp)
- trailer[0] |= BIT(partner->index);
+ trailer[0] = dsa_xmit_port_mask(skb, dev);
return skb;
}
@@ -39,7 +33,7 @@ static struct sk_buff *xrs700x_rcv(struct sk_buff *skb, struct net_device *dev)
if (source_port < 0)
return NULL;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
@@ -60,6 +54,7 @@ static const struct dsa_device_ops xrs700x_netdev_ops = {
.needed_tailroom = 1,
};
+MODULE_DESCRIPTION("DSA tag driver for XRS700x switches");
MODULE_LICENSE("GPL");
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_XRS700X, XRS700X_NAME);
diff --git a/net/dsa/tag_yt921x.c b/net/dsa/tag_yt921x.c
new file mode 100644
index 000000000000..6bbfd42dc5df
--- /dev/null
+++ b/net/dsa/tag_yt921x.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Motorcomm YT921x Switch Extended CPU Port Tagging
+ *
+ * Copyright (c) 2025 David Yang <mmyangfl@gmail.com>
+ *
+ * +----+----+-------+-----+----+---------
+ * | DA | SA | TagET | Tag | ET | Payload ...
+ * +----+----+-------+-----+----+---------
+ * 6 6 2 6 2 N
+ *
+ * Tag Ethertype: CPU_TAG_TPID_TPID (default: ETH_P_YT921X = 0x9988)
+ * * Hardcoded for the moment, but still configurable. Discuss it if there
+ * are conflicts somewhere and/or you want to change it for some reason.
+ * Tag:
+ * 2: VLAN Tag
+ * 2: Rx Port
+ * 15b: Rx Port Valid
+ * 14b-11b: Rx Port
+ * 10b-0b: Cmd?
+ * 2: Tx Port(s)
+ * 15b: Tx Port(s) Valid
+ * 10b-0b: Tx Port(s) Mask
+ */
+
+#include <linux/etherdevice.h>
+
+#include "tag.h"
+
+#define YT921X_TAG_NAME "yt921x"
+
+#define YT921X_TAG_LEN 8
+
+#define YT921X_TAG_PORT_EN BIT(15)
+#define YT921X_TAG_RX_PORT_M GENMASK(14, 11)
+#define YT921X_TAG_RX_CMD_M GENMASK(10, 0)
+#define YT921X_TAG_RX_CMD(x) FIELD_PREP(YT921X_TAG_RX_CMD_M, (x))
+#define YT921X_TAG_RX_CMD_FORWARDED 0x80
+#define YT921X_TAG_RX_CMD_UNK_UCAST 0xb2
+#define YT921X_TAG_RX_CMD_UNK_MCAST 0xb4
+#define YT921X_TAG_TX_PORTS GENMASK(10, 0)
+
+static struct sk_buff *
+yt921x_tag_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ __be16 *tag;
+ u16 tx;
+
+ skb_push(skb, YT921X_TAG_LEN);
+ dsa_alloc_etype_header(skb, YT921X_TAG_LEN);
+
+ tag = dsa_etype_header_pos_tx(skb);
+
+ tag[0] = htons(ETH_P_YT921X);
+ /* VLAN tag unrelated when TX */
+ tag[1] = 0;
+ tag[2] = 0;
+ tx = FIELD_PREP(YT921X_TAG_TX_PORTS, dsa_xmit_port_mask(skb, netdev)) |
+ YT921X_TAG_PORT_EN;
+ tag[3] = htons(tx);
+
+ return skb;
+}
+
+static struct sk_buff *
+yt921x_tag_rcv(struct sk_buff *skb, struct net_device *netdev)
+{
+ unsigned int port;
+ __be16 *tag;
+ u16 cmd;
+ u16 rx;
+
+ if (unlikely(!pskb_may_pull(skb, YT921X_TAG_LEN)))
+ return NULL;
+
+ tag = dsa_etype_header_pos_rx(skb);
+
+ if (unlikely(tag[0] != htons(ETH_P_YT921X))) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected EtherType 0x%04x\n",
+ ntohs(tag[0]));
+ return NULL;
+ }
+
+ /* Locate which port this is coming from */
+ rx = ntohs(tag[2]);
+ if (unlikely((rx & YT921X_TAG_PORT_EN) == 0)) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected rx tag 0x%04x\n", rx);
+ return NULL;
+ }
+
+ port = FIELD_GET(YT921X_TAG_RX_PORT_M, rx);
+ skb->dev = dsa_conduit_find_user(netdev, 0, port);
+ if (unlikely(!skb->dev)) {
+ dev_warn_ratelimited(&netdev->dev,
+ "Couldn't decode source port %u\n", port);
+ return NULL;
+ }
+
+ cmd = FIELD_GET(YT921X_TAG_RX_CMD_M, rx);
+ switch (cmd) {
+ case YT921X_TAG_RX_CMD_FORWARDED:
+ /* Already forwarded by hardware */
+ dsa_default_offload_fwd_mark(skb);
+ break;
+ case YT921X_TAG_RX_CMD_UNK_UCAST:
+ case YT921X_TAG_RX_CMD_UNK_MCAST:
+ /* NOTE: hardware doesn't distinguish between TRAP (copy to CPU
+ * only) and COPY (forward and copy to CPU). In order to perform
+ * a soft switch, NEVER use COPY action in the switch driver.
+ */
+ break;
+ default:
+ dev_warn_ratelimited(&netdev->dev,
+ "Unexpected rx cmd 0x%02x\n", cmd);
+ break;
+ }
+
+ /* Remove YT921x tag and update checksum */
+ skb_pull_rcsum(skb, YT921X_TAG_LEN);
+ dsa_strip_etype_header(skb, YT921X_TAG_LEN);
+
+ return skb;
+}
+
+static const struct dsa_device_ops yt921x_netdev_ops = {
+ .name = YT921X_TAG_NAME,
+ .proto = DSA_TAG_PROTO_YT921X,
+ .xmit = yt921x_tag_xmit,
+ .rcv = yt921x_tag_rcv,
+ .needed_headroom = YT921X_TAG_LEN,
+};
+
+MODULE_DESCRIPTION("DSA tag driver for Motorcomm YT921x switches");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_YT921X, YT921X_TAG_NAME);
+
+module_dsa_tag_driver(yt921x_netdev_ops);
diff --git a/net/dsa/trace.c b/net/dsa/trace.c
new file mode 100644
index 000000000000..1b107165d331
--- /dev/null
+++ b/net/dsa/trace.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2022-2023 NXP
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+void dsa_db_print(const struct dsa_db *db, char buf[DSA_DB_BUFSIZ])
+{
+ switch (db->type) {
+ case DSA_DB_PORT:
+ sprintf(buf, "port %s", db->dp->name);
+ break;
+ case DSA_DB_LAG:
+ sprintf(buf, "lag %s id %d", db->lag.dev->name, db->lag.id);
+ break;
+ case DSA_DB_BRIDGE:
+ sprintf(buf, "bridge %s num %d", db->bridge.dev->name,
+ db->bridge.num);
+ break;
+ default:
+ sprintf(buf, "unknown");
+ break;
+ }
+}
+
+const char *dsa_port_kind(const struct dsa_port *dp)
+{
+ switch (dp->type) {
+ case DSA_PORT_TYPE_USER:
+ return "user";
+ case DSA_PORT_TYPE_CPU:
+ return "cpu";
+ case DSA_PORT_TYPE_DSA:
+ return "dsa";
+ default:
+ return "unused";
+ }
+}
diff --git a/net/dsa/trace.h b/net/dsa/trace.h
new file mode 100644
index 000000000000..83f3e5f78491
--- /dev/null
+++ b/net/dsa/trace.h
@@ -0,0 +1,447 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2022-2023 NXP
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dsa
+
+#if !defined(_NET_DSA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _NET_DSA_TRACE_H
+
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/refcount.h>
+#include <linux/tracepoint.h>
+
+/* Enough to fit "bridge %s num %d" where num has 3 digits */
+#define DSA_DB_BUFSIZ (IFNAMSIZ + 16)
+
+void dsa_db_print(const struct dsa_db *db, char buf[DSA_DB_BUFSIZ]);
+const char *dsa_port_kind(const struct dsa_port *dp);
+
+DECLARE_EVENT_CLASS(dsa_port_addr_op_hw,
+
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr, u16 vid,
+ const struct dsa_db *db, int err),
+
+ TP_ARGS(dp, addr, vid, db, err),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->err = err;
+ ),
+
+ TP_printk("%s %s port %d addr %pM vid %u db \"%s\" err %d",
+ __get_str(dev), __get_str(kind), __entry->port, __entry->addr,
+ __entry->vid, __entry->db_buf, __entry->err)
+);
+
+/* Add unicast/multicast address to hardware, either on user ports
+ * (where no refcounting is kept), or on shared ports when the entry
+ * is first seen and its refcount is 1.
+ */
+DEFINE_EVENT(dsa_port_addr_op_hw, dsa_fdb_add_hw,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+ TP_ARGS(dp, addr, vid, db, err));
+
+DEFINE_EVENT(dsa_port_addr_op_hw, dsa_mdb_add_hw,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+ TP_ARGS(dp, addr, vid, db, err));
+
+/* Delete unicast/multicast address from hardware, either on user ports or
+ * when the refcount on shared ports reaches 0
+ */
+DEFINE_EVENT(dsa_port_addr_op_hw, dsa_fdb_del_hw,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+ TP_ARGS(dp, addr, vid, db, err));
+
+DEFINE_EVENT(dsa_port_addr_op_hw, dsa_mdb_del_hw,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+ TP_ARGS(dp, addr, vid, db, err));
+
+DECLARE_EVENT_CLASS(dsa_port_addr_op_refcount,
+
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr, u16 vid,
+ const struct dsa_db *db, const refcount_t *refcount),
+
+ TP_ARGS(dp, addr, vid, db, refcount),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->refcount = refcount_read(refcount);
+ ),
+
+ TP_printk("%s %s port %d addr %pM vid %u db \"%s\" refcount %u",
+ __get_str(dev), __get_str(kind), __entry->port, __entry->addr,
+ __entry->vid, __entry->db_buf, __entry->refcount)
+);
+
+/* Bump the refcount of an existing unicast/multicast address on shared ports */
+DEFINE_EVENT(dsa_port_addr_op_refcount, dsa_fdb_add_bump,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db,
+ const refcount_t *refcount),
+ TP_ARGS(dp, addr, vid, db, refcount));
+
+DEFINE_EVENT(dsa_port_addr_op_refcount, dsa_mdb_add_bump,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db,
+ const refcount_t *refcount),
+ TP_ARGS(dp, addr, vid, db, refcount));
+
+/* Drop the refcount of a multicast address that we still keep on
+ * shared ports
+ */
+DEFINE_EVENT(dsa_port_addr_op_refcount, dsa_fdb_del_drop,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db,
+ const refcount_t *refcount),
+ TP_ARGS(dp, addr, vid, db, refcount));
+
+DEFINE_EVENT(dsa_port_addr_op_refcount, dsa_mdb_del_drop,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db,
+ const refcount_t *refcount),
+ TP_ARGS(dp, addr, vid, db, refcount));
+
+DECLARE_EVENT_CLASS(dsa_port_addr_del_not_found,
+
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr, u16 vid,
+ const struct dsa_db *db),
+
+ TP_ARGS(dp, addr, vid, db),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ ),
+
+ TP_printk("%s %s port %d addr %pM vid %u db \"%s\"",
+ __get_str(dev), __get_str(kind), __entry->port,
+ __entry->addr, __entry->vid, __entry->db_buf)
+);
+
+/* Attempt to delete a unicast/multicast address on shared ports for which
+ * the delete operation was called more times than the addition
+ */
+DEFINE_EVENT(dsa_port_addr_del_not_found, dsa_fdb_del_not_found,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db),
+ TP_ARGS(dp, addr, vid, db));
+
+DEFINE_EVENT(dsa_port_addr_del_not_found, dsa_mdb_del_not_found,
+ TP_PROTO(const struct dsa_port *dp, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db),
+ TP_ARGS(dp, addr, vid, db));
+
+TRACE_EVENT(dsa_lag_fdb_add_hw,
+
+ TP_PROTO(const struct net_device *lag_dev, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+
+ TP_ARGS(lag_dev, addr, vid, db, err),
+
+ TP_STRUCT__entry(
+ __string(dev, lag_dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->err = err;
+ ),
+
+ TP_printk("%s addr %pM vid %u db \"%s\" err %d",
+ __get_str(dev), __entry->addr, __entry->vid,
+ __entry->db_buf, __entry->err)
+);
+
+TRACE_EVENT(dsa_lag_fdb_add_bump,
+
+ TP_PROTO(const struct net_device *lag_dev, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, const refcount_t *refcount),
+
+ TP_ARGS(lag_dev, addr, vid, db, refcount),
+
+ TP_STRUCT__entry(
+ __string(dev, lag_dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->refcount = refcount_read(refcount);
+ ),
+
+ TP_printk("%s addr %pM vid %u db \"%s\" refcount %u",
+ __get_str(dev), __entry->addr, __entry->vid,
+ __entry->db_buf, __entry->refcount)
+);
+
+TRACE_EVENT(dsa_lag_fdb_del_hw,
+
+ TP_PROTO(const struct net_device *lag_dev, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, int err),
+
+ TP_ARGS(lag_dev, addr, vid, db, err),
+
+ TP_STRUCT__entry(
+ __string(dev, lag_dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->err = err;
+ ),
+
+ TP_printk("%s addr %pM vid %u db \"%s\" err %d",
+ __get_str(dev), __entry->addr, __entry->vid,
+ __entry->db_buf, __entry->err)
+);
+
+TRACE_EVENT(dsa_lag_fdb_del_drop,
+
+ TP_PROTO(const struct net_device *lag_dev, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db, const refcount_t *refcount),
+
+ TP_ARGS(lag_dev, addr, vid, db, refcount),
+
+ TP_STRUCT__entry(
+ __string(dev, lag_dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ __entry->refcount = refcount_read(refcount);
+ ),
+
+ TP_printk("%s addr %pM vid %u db \"%s\" refcount %u",
+ __get_str(dev), __entry->addr, __entry->vid,
+ __entry->db_buf, __entry->refcount)
+);
+
+TRACE_EVENT(dsa_lag_fdb_del_not_found,
+
+ TP_PROTO(const struct net_device *lag_dev, const unsigned char *addr,
+ u16 vid, const struct dsa_db *db),
+
+ TP_ARGS(lag_dev, addr, vid, db),
+
+ TP_STRUCT__entry(
+ __string(dev, lag_dev->name)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __array(char, db_buf, DSA_DB_BUFSIZ)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ ether_addr_copy(__entry->addr, addr);
+ __entry->vid = vid;
+ dsa_db_print(db, __entry->db_buf);
+ ),
+
+ TP_printk("%s addr %pM vid %u db \"%s\"",
+ __get_str(dev), __entry->addr, __entry->vid, __entry->db_buf)
+);
+
+DECLARE_EVENT_CLASS(dsa_vlan_op_hw,
+
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan, int err),
+
+ TP_ARGS(dp, vlan, err),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __field(u16, vid)
+ __field(u16, flags)
+ __field(bool, changed)
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ __entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;
+ __entry->changed = vlan->changed;
+ __entry->err = err;
+ ),
+
+ TP_printk("%s %s port %d vid %u%s%s%s",
+ __get_str(dev), __get_str(kind), __entry->port, __entry->vid,
+ __entry->flags & BRIDGE_VLAN_INFO_PVID ? " pvid" : "",
+ __entry->flags & BRIDGE_VLAN_INFO_UNTAGGED ? " untagged" : "",
+ __entry->changed ? " (changed)" : "")
+);
+
+DEFINE_EVENT(dsa_vlan_op_hw, dsa_vlan_add_hw,
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan, int err),
+ TP_ARGS(dp, vlan, err));
+
+DEFINE_EVENT(dsa_vlan_op_hw, dsa_vlan_del_hw,
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan, int err),
+ TP_ARGS(dp, vlan, err));
+
+DECLARE_EVENT_CLASS(dsa_vlan_op_refcount,
+
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ const refcount_t *refcount),
+
+ TP_ARGS(dp, vlan, refcount),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __field(u16, vid)
+ __field(u16, flags)
+ __field(bool, changed)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ __entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;
+ __entry->changed = vlan->changed;
+ __entry->refcount = refcount_read(refcount);
+ ),
+
+ TP_printk("%s %s port %d vid %u%s%s%s refcount %u",
+ __get_str(dev), __get_str(kind), __entry->port, __entry->vid,
+ __entry->flags & BRIDGE_VLAN_INFO_PVID ? " pvid" : "",
+ __entry->flags & BRIDGE_VLAN_INFO_UNTAGGED ? " untagged" : "",
+ __entry->changed ? " (changed)" : "", __entry->refcount)
+);
+
+DEFINE_EVENT(dsa_vlan_op_refcount, dsa_vlan_add_bump,
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ const refcount_t *refcount),
+ TP_ARGS(dp, vlan, refcount));
+
+DEFINE_EVENT(dsa_vlan_op_refcount, dsa_vlan_del_drop,
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ const refcount_t *refcount),
+ TP_ARGS(dp, vlan, refcount));
+
+TRACE_EVENT(dsa_vlan_del_not_found,
+
+ TP_PROTO(const struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan),
+
+ TP_ARGS(dp, vlan),
+
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dp->ds->dev))
+ __string(kind, dsa_port_kind(dp))
+ __field(int, port)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(kind);
+ __entry->port = dp->index;
+ __entry->vid = vlan->vid;
+ ),
+
+ TP_printk("%s %s port %d vid %u",
+ __get_str(dev), __get_str(kind), __entry->port, __entry->vid)
+);
+
+#endif /* _NET_DSA_TRACE_H */
+
+/* We don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/net/dsa/slave.c b/net/dsa/user.c
index aab79c355224..f59d66f0975d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/user.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * net/dsa/slave.c - Slave device handling
+ * net/dsa/user.c - user device handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
@@ -21,13 +21,15 @@
#include <linux/if_hsr.h>
#include <net/dcbnl.h>
#include <linux/netpoll.h>
+#include <linux/string.h>
+#include "conduit.h"
#include "dsa.h"
-#include "port.h"
-#include "master.h"
#include "netlink.h"
-#include "slave.h"
+#include "port.h"
+#include "switch.h"
#include "tag.h"
+#include "user.h"
struct dsa_switchdev_event_work {
struct net_device *dev;
@@ -57,6 +59,12 @@ struct dsa_standalone_event_work {
u16 vid;
};
+struct dsa_host_vlan_rx_filtering_ctx {
+ struct net_device *dev;
+ const unsigned char *addr;
+ enum dsa_standalone_event event;
+};
+
static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
{
return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
@@ -71,13 +79,13 @@ static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
!ds->needs_standalone_vlan_filtering;
}
-static void dsa_slave_standalone_event_work(struct work_struct *work)
+static void dsa_user_standalone_event_work(struct work_struct *work)
{
struct dsa_standalone_event_work *standalone_work =
container_of(work, struct dsa_standalone_event_work, work);
const unsigned char *addr = standalone_work->addr;
struct net_device *dev = standalone_work->dev;
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_mdb mdb;
struct dsa_switch *ds = dp->ds;
u16 vid = standalone_work->vid;
@@ -132,10 +140,10 @@ static void dsa_slave_standalone_event_work(struct work_struct *work)
kfree(standalone_work);
}
-static int dsa_slave_schedule_standalone_work(struct net_device *dev,
- enum dsa_standalone_event event,
- const unsigned char *addr,
- u16 vid)
+static int dsa_user_schedule_standalone_work(struct net_device *dev,
+ enum dsa_standalone_event event,
+ const unsigned char *addr,
+ u16 vid)
{
struct dsa_standalone_event_work *standalone_work;
@@ -143,7 +151,7 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
if (!standalone_work)
return -ENOMEM;
- INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
+ INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
standalone_work->event = event;
standalone_work->dev = dev;
@@ -155,75 +163,129 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
return 0;
}
-static int dsa_slave_sync_uc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_host_vlan_rx_filtering(void *arg, int vid)
+{
+ struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
+
+ return dsa_user_schedule_standalone_work(ctx->dev, ctx->event,
+ ctx->addr, vid);
+}
+
+static int dsa_user_vlan_for_each(struct net_device *dev,
+ int (*cb)(void *arg, int vid), void *arg)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_vlan *v;
+ int err;
+
+ lockdep_assert_held(&dev->addr_list_lock);
+
+ err = cb(arg, 0);
+ if (err)
+ return err;
+
+ list_for_each_entry(v, &dp->user_vlans, list) {
+ err = cb(arg, v->vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dsa_user_sync_uc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_host_vlan_rx_filtering_ctx ctx = {
+ .dev = dev,
+ .addr = addr,
+ .event = DSA_UC_ADD,
+ };
- dev_uc_add(master, addr);
+ dev_uc_add(conduit, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
- return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
+ &ctx);
}
-static int dsa_slave_unsync_uc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_unsync_uc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_host_vlan_rx_filtering_ctx ctx = {
+ .dev = dev,
+ .addr = addr,
+ .event = DSA_UC_DEL,
+ };
- dev_uc_del(master, addr);
+ dev_uc_del(conduit, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
- return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
+ &ctx);
}
-static int dsa_slave_sync_mc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_sync_mc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_host_vlan_rx_filtering_ctx ctx = {
+ .dev = dev,
+ .addr = addr,
+ .event = DSA_MC_ADD,
+ };
- dev_mc_add(master, addr);
+ dev_mc_add(conduit, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
- return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
+ &ctx);
}
-static int dsa_slave_unsync_mc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_unsync_mc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_host_vlan_rx_filtering_ctx ctx = {
+ .dev = dev,
+ .addr = addr,
+ .event = DSA_MC_DEL,
+ };
- dev_mc_del(master, addr);
+ dev_mc_del(conduit, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
- return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
+ &ctx);
}
-void dsa_slave_sync_ha(struct net_device *dev)
+void dsa_user_sync_ha(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_mc_addr(ha, dev)
- dsa_slave_sync_mc(dev, ha->addr);
+ dsa_user_sync_mc(dev, ha->addr);
netdev_for_each_synced_uc_addr(ha, dev)
- dsa_slave_sync_uc(dev, ha->addr);
+ dsa_user_sync_uc(dev, ha->addr);
netif_addr_unlock_bh(dev);
@@ -232,19 +294,19 @@ void dsa_slave_sync_ha(struct net_device *dev)
dsa_flush_workqueue();
}
-void dsa_slave_unsync_ha(struct net_device *dev)
+void dsa_user_unsync_ha(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_uc_addr(ha, dev)
- dsa_slave_unsync_uc(dev, ha->addr);
+ dsa_user_unsync_uc(dev, ha->addr);
netdev_for_each_synced_mc_addr(ha, dev)
- dsa_slave_unsync_mc(dev, ha->addr);
+ dsa_user_unsync_mc(dev, ha->addr);
netif_addr_unlock_bh(dev);
@@ -253,8 +315,8 @@ void dsa_slave_unsync_ha(struct net_device *dev)
dsa_flush_workqueue();
}
-/* slave mii_bus handling ***************************************************/
-static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
+/* user mii_bus handling ***************************************************/
+static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg)
{
struct dsa_switch *ds = bus->priv;
@@ -264,7 +326,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
return 0xffff;
}
-static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
+static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct dsa_switch *ds = bus->priv;
@@ -274,120 +336,141 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
return 0;
}
-void dsa_slave_mii_bus_init(struct dsa_switch *ds)
+void dsa_user_mii_bus_init(struct dsa_switch *ds)
{
- ds->slave_mii_bus->priv = (void *)ds;
- ds->slave_mii_bus->name = "dsa slave smi";
- ds->slave_mii_bus->read = dsa_slave_phy_read;
- ds->slave_mii_bus->write = dsa_slave_phy_write;
- snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
+ ds->user_mii_bus->priv = (void *)ds;
+ ds->user_mii_bus->name = "dsa user smi";
+ ds->user_mii_bus->read = dsa_user_phy_read;
+ ds->user_mii_bus->write = dsa_user_phy_write;
+ snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
ds->dst->index, ds->index);
- ds->slave_mii_bus->parent = ds->dev;
- ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+ ds->user_mii_bus->parent = ds->dev;
+ ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
-/* slave device handling ****************************************************/
-static int dsa_slave_get_iflink(const struct net_device *dev)
+/* user device handling ****************************************************/
+static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_slave_to_master(dev)->ifindex;
+ return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
-static int dsa_slave_open(struct net_device *dev)
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err;
- err = dev_open(master, NULL);
- if (err < 0) {
- netdev_err(dev, "failed to open master %s\n", master->name);
- goto out;
- }
-
if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0);
+ err = dsa_port_standalone_host_fdb_add(dp, addr, 0);
if (err)
goto out;
}
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
- err = dev_uc_add(master, dev->dev_addr);
+ if (!ether_addr_equal(addr, conduit->dev_addr)) {
+ err = dev_uc_add(conduit, addr);
if (err < 0)
goto del_host_addr;
}
- err = dsa_port_enable_rt(dp, dev->phydev);
- if (err)
- goto del_unicast;
-
return 0;
-del_unicast:
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
del_host_addr:
if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_port_standalone_host_fdb_del(dp, addr, 0);
out:
return err;
}
-static int dsa_slave_close(struct net_device *dev)
+void dsa_user_host_uc_uninstall(struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- dsa_port_disable_rt(dp);
-
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
+ if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
+ dev_uc_del(conduit, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+}
+
+static int dsa_user_open(struct net_device *dev)
+{
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ int err;
+
+ err = dev_open(conduit, NULL);
+ if (err < 0) {
+ netdev_err(dev, "failed to open conduit %s\n", conduit->name);
+ goto out;
+ }
+
+ err = dsa_user_host_uc_install(dev, dev->dev_addr);
+ if (err)
+ goto out;
+
+ err = dsa_port_enable_rt(dp, dev->phydev);
+ if (err)
+ goto out_del_host_uc;
return 0;
+
+out_del_host_uc:
+ dsa_user_host_uc_uninstall(dev);
+out:
+ return err;
}
-static void dsa_slave_manage_host_flood(struct net_device *dev)
+static int dsa_user_close(struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+
+ dsa_port_disable_rt(dp);
+
+ dsa_user_host_uc_uninstall(dev);
+
+ return 0;
+}
+
+static void dsa_user_manage_host_flood(struct net_device *dev)
{
bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
bool uc = dev->flags & IFF_PROMISC;
dsa_port_set_host_flood(dp, uc, mc);
}
-static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
+static void dsa_user_change_rx_flags(struct net_device *dev, int change)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (change & IFF_ALLMULTI)
- dev_set_allmulti(master,
+ dev_set_allmulti(conduit,
dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
- dev_set_promiscuity(master,
+ dev_set_promiscuity(conduit,
dev->flags & IFF_PROMISC ? 1 : -1);
if (dsa_switch_supports_uc_filtering(ds) &&
dsa_switch_supports_mc_filtering(ds))
- dsa_slave_manage_host_flood(dev);
+ dsa_user_manage_host_flood(dev);
}
-static void dsa_slave_set_rx_mode(struct net_device *dev)
+static void dsa_user_set_rx_mode(struct net_device *dev)
{
- __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
- __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
+ __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc);
+ __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc);
}
-static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
+static int dsa_user_set_mac_address(struct net_device *dev, void *a)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
int err;
@@ -395,43 +478,32 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* If the port is down, the address isn't synced yet to hardware or
- * to the DSA master, so there is nothing to change.
- */
- if (!(dev->flags & IFF_UP))
- goto out_change_dev_addr;
-
- if (dsa_switch_supports_uc_filtering(ds)) {
- err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0);
+ if (ds->ops->port_set_mac_address) {
+ err = ds->ops->port_set_mac_address(ds, dp->index,
+ addr->sa_data);
if (err)
return err;
}
- if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
- err = dev_uc_add(master, addr->sa_data);
- if (err < 0)
- goto del_unicast;
- }
+ /* If the port is down, the address isn't synced yet to hardware or
+ * to the DSA conduit, so there is nothing to change.
+ */
+ if (!(dev->flags & IFF_UP))
+ goto out_change_dev_addr;
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
+ err = dsa_user_host_uc_install(dev, addr->sa_data);
+ if (err)
+ return err;
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
+ dsa_user_host_uc_uninstall(dev);
out_change_dev_addr:
eth_hw_addr_set(dev, addr->sa_data);
return 0;
-
-del_unicast:
- if (dsa_switch_supports_uc_filtering(ds))
- dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0);
-
- return err;
}
-struct dsa_slave_dump_ctx {
+struct dsa_user_dump_ctx {
struct net_device *dev;
struct sk_buff *skb;
struct netlink_callback *cb;
@@ -439,16 +511,17 @@ struct dsa_slave_dump_ctx {
};
static int
-dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data)
+dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
{
- struct dsa_slave_dump_ctx *dump = data;
+ struct dsa_user_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
@@ -483,12 +556,12 @@ nla_put_failure:
}
static int
-dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev, struct net_device *filter_dev,
- int *idx)
+dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, struct net_device *filter_dev,
+ int *idx)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_dump_ctx dump = {
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_dump_ctx dump = {
.dev = dev,
.skb = skb,
.cb = cb,
@@ -496,38 +569,24 @@ dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
};
int err;
- err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
+ err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump);
*idx = dump.idx;
return err;
}
-static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->dp->ds;
- int port = p->dp->index;
-
- /* Pass through to switch driver if it supports timestamping */
- switch (cmd) {
- case SIOCGHWTSTAMP:
- if (ds->ops->port_hwtstamp_get)
- return ds->ops->port_hwtstamp_get(ds, port, ifr);
- break;
- case SIOCSHWTSTAMP:
- if (ds->ops->port_hwtstamp_set)
- return ds->ops->port_hwtstamp_set(ds, port, ifr);
- break;
- }
+ struct dsa_user_priv *p = netdev_priv(dev);
return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
}
-static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
- const struct switchdev_attr *attr,
- struct netlink_ext_ack *extack)
+static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int ret;
if (ctx && ctx != dp)
@@ -594,13 +653,13 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
/* Must be called under rcu_read_lock() */
static int
-dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
- const struct switchdev_obj_port_vlan *vlan)
+dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct net_device *upper_dev;
struct list_head *iter;
- netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
u16 vid;
if (!is_vlan_dev(upper_dev))
@@ -614,11 +673,11 @@ dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
return 0;
}
-static int dsa_slave_vlan_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
int err;
@@ -634,7 +693,7 @@ static int dsa_slave_vlan_add(struct net_device *dev,
*/
if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
- err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
+ err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan);
rcu_read_unlock();
if (err) {
NL_SET_ERR_MSG_MOD(extack,
@@ -649,11 +708,11 @@ static int dsa_slave_vlan_add(struct net_device *dev,
/* Offload a VLAN installed on the bridge or on a foreign interface by
* installing it as a VLAN towards the CPU port.
*/
-static int dsa_slave_host_vlan_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_host_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan;
/* Do nothing if this is a software bridge */
@@ -675,11 +734,11 @@ static int dsa_slave_host_vlan_add(struct net_device *dev,
return dsa_port_host_vlan_add(dp, &vlan, extack);
}
-static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int err;
if (ctx && ctx != dp)
@@ -700,9 +759,9 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- err = dsa_slave_vlan_add(dev, obj, extack);
+ err = dsa_user_vlan_add(dev, obj, extack);
else
- err = dsa_slave_host_vlan_add(dev, obj, extack);
+ err = dsa_user_host_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
@@ -725,10 +784,10 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
return err;
}
-static int dsa_slave_vlan_del(struct net_device *dev,
- const struct switchdev_obj *obj)
+static int dsa_user_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
if (dsa_port_skip_vlan_configuration(dp))
@@ -739,10 +798,10 @@ static int dsa_slave_vlan_del(struct net_device *dev,
return dsa_port_vlan_del(dp, vlan);
}
-static int dsa_slave_host_vlan_del(struct net_device *dev,
- const struct switchdev_obj *obj)
+static int dsa_user_host_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
/* Do nothing if this is a software bridge */
@@ -757,10 +816,10 @@ static int dsa_slave_host_vlan_del(struct net_device *dev,
return dsa_port_host_vlan_del(dp, vlan);
}
-static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
- const struct switchdev_obj *obj)
+static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int err;
if (ctx && ctx != dp)
@@ -781,9 +840,9 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- err = dsa_slave_vlan_del(dev, obj);
+ err = dsa_user_vlan_del(dev, obj);
else
- err = dsa_slave_host_vlan_del(dev, obj);
+ err = dsa_user_host_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
@@ -806,11 +865,11 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
return netpoll_send_skb(p->netpoll, skb);
#else
@@ -819,12 +878,12 @@ static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
#endif
}
-static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
+static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NOBPF))
return;
if (!ds->ops->port_txtstamp)
@@ -839,45 +898,21 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
* tag to be successfully transmitted
*/
if (unlikely(netpoll_tx_running(dev)))
- return dsa_slave_netpoll_send_skb(dev, skb);
+ return dsa_user_netpoll_send_skb(dev, skb);
/* Queue the SKB for transmission on the parent interface, but
* do not modify its EtherType
*/
- skb->dev = dsa_slave_to_master(dev);
+ skb->dev = dsa_user_to_conduit(dev);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
-static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int needed_headroom = dev->needed_headroom;
- int needed_tailroom = dev->needed_tailroom;
-
- /* For tail taggers, we need to pad short frames ourselves, to ensure
- * that the tail tag does not fail at its role of being at the end of
- * the packet, once the master interface pads the frame. Account for
- * that pad length here, and pad later.
- */
- if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
- needed_tailroom += ETH_ZLEN - skb->len;
- /* skb_headroom() returns unsigned int... */
- needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
- needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
-
- if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
- /* No reallocation needed, yay! */
- return 0;
-
- return pskb_expand_head(skb, needed_headroom, needed_tailroom,
- GFP_ATOMIC);
-}
-
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
dev_sw_netstats_tx_add(dev, 1, skb->len);
@@ -887,13 +922,14 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
/* Handle tx timestamp if any */
dsa_skb_tx_timestamp(p, skb);
- if (dsa_realloc_skb(skb, dev)) {
+ if (skb_ensure_writable_head_tail(skb, dev)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* needed_tailroom should still be 'warm' in the cache line from
- * dsa_realloc_skb(), which has also ensured that padding is safe.
+ * skb_ensure_writable_head_tail(), which has also ensured that
+ * padding is safe.
*/
if (dev->needed_tailroom)
eth_skb_pad(skb);
@@ -912,17 +948,17 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
/* ethtool operations *******************************************************/
-static void dsa_slave_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
+static void dsa_user_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
-static int dsa_slave_get_regs_len(struct net_device *dev)
+static int dsa_user_get_regs_len(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs_len)
@@ -932,25 +968,25 @@ static int dsa_slave_get_regs_len(struct net_device *dev)
}
static void
-dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs)
ds->ops->get_regs(ds, dp->index, regs, _p);
}
-static int dsa_slave_nway_reset(struct net_device *dev)
+static int dsa_user_nway_reset(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_nway_reset(dp->pl);
}
-static int dsa_slave_get_eeprom_len(struct net_device *dev)
+static int dsa_user_get_eeprom_len(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->cd && ds->cd->eeprom_len)
@@ -962,10 +998,10 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
return 0;
}
-static int dsa_slave_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int dsa_user_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eeprom)
@@ -974,10 +1010,10 @@ static int dsa_slave_get_eeprom(struct net_device *dev,
return -EOPNOTSUPP;
}
-static int dsa_slave_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int dsa_user_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->set_eeprom)
@@ -986,33 +1022,30 @@ static int dsa_slave_set_eeprom(struct net_device *dev,
return -EOPNOTSUPP;
}
-static void dsa_slave_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
+static void dsa_user_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (stringset == ETH_SS_STATS) {
- int len = ETH_GSTRING_LEN;
-
- strncpy(data, "tx_packets", len);
- strncpy(data + len, "tx_bytes", len);
- strncpy(data + 2 * len, "rx_packets", len);
- strncpy(data + 3 * len, "rx_bytes", len);
+ ethtool_puts(&data, "tx_packets");
+ ethtool_puts(&data, "tx_bytes");
+ ethtool_puts(&data, "rx_packets");
+ ethtool_puts(&data, "rx_bytes");
if (ds->ops->get_strings)
- ds->ops->get_strings(ds, dp->index, stringset,
- data + 4 * len);
+ ds->ops->get_strings(ds, dp->index, stringset, data);
} else if (stringset == ETH_SS_TEST) {
net_selftest_get_strings(data);
}
}
-static void dsa_slave_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static void dsa_user_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct pcpu_sw_netstats *s;
unsigned int start;
@@ -1038,9 +1071,9 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
}
-static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
+static int dsa_user_get_sset_count(struct net_device *dev, int sset)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
@@ -1060,20 +1093,20 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
-static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
- struct ethtool_eth_phy_stats *phy_stats)
+static void dsa_user_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_phy_stats)
ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
}
-static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
- struct ethtool_eth_mac_stats *mac_stats)
+static void dsa_user_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_mac_stats)
@@ -1081,10 +1114,10 @@ static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
}
static void
-dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
- struct ethtool_eth_ctrl_stats *ctrl_stats)
+dsa_user_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_ctrl_stats)
@@ -1092,21 +1125,31 @@ dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
}
static void
-dsa_slave_get_rmon_stats(struct net_device *dev,
- struct ethtool_rmon_stats *rmon_stats,
- const struct ethtool_rmon_hist_range **ranges)
+dsa_user_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_rmon_stats)
ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
}
-static void dsa_slave_net_selftest(struct net_device *ndev,
- struct ethtool_test *etest, u64 *buf)
+static void dsa_user_get_ts_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_ts_stats)
+ ds->ops->get_ts_stats(ds, dp->index, ts_stats);
+}
+
+static void dsa_user_net_selftest(struct net_device *ndev,
+ struct ethtool_test *etest, u64 *buf)
{
- struct dsa_port *dp = dsa_slave_to_port(ndev);
+ struct dsa_port *dp = dsa_user_to_port(ndev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->self_test) {
@@ -1117,9 +1160,43 @@ static void dsa_slave_net_selftest(struct net_device *ndev,
net_selftest(ndev, etest, buf);
}
-static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+static int dsa_user_get_mm(struct net_device *dev,
+ struct ethtool_mm_state *state)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->get_mm)
+ return -EOPNOTSUPP;
+
+ return ds->ops->get_mm(ds, dp->index, state);
+}
+
+static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->set_mm)
+ return -EOPNOTSUPP;
+
+ return ds->ops->set_mm(ds, dp->index, cfg, extack);
+}
+
+static void dsa_user_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (ds->ops->get_mm_stats)
+ ds->ops->get_mm_stats(ds, dp->index, stats);
+}
+
+static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
phylink_ethtool_get_wol(dp->pl, w);
@@ -1128,9 +1205,9 @@ static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
ds->ops->get_wol(ds, dp->index, w);
}
-static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret = -EOPNOTSUPP;
@@ -1142,94 +1219,102 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
- /* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
- return -ENODEV;
-
- if (!ds->ops->set_mac_eee)
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
return -EOPNOTSUPP;
- ret = ds->ops->set_mac_eee(ds, dp->index, e);
- if (ret)
- return ret;
+ /* If the port is using phylink managed EEE, then an unimplemented
+ * set_mac_eee() is permissible.
+ */
+ if (!phylink_mac_implements_lpi(ds->phylink_mac_ops)) {
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!ds->ops->set_mac_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->ops->set_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+ } else if (ds->ops->set_mac_eee) {
+ ret = ds->ops->set_mac_eee(ds, dp->index, e);
+ if (ret)
+ return ret;
+ }
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
- int ret;
-
- /* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev || !dp->pl)
- return -ENODEV;
- if (!ds->ops->get_mac_eee)
+ /* Check whether the switch supports EEE */
+ if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index))
return -EOPNOTSUPP;
- ret = ds->ops->get_mac_eee(ds, dp->index, e);
- if (ret)
- return ret;
+ /* Port's PHY and MAC both need to be EEE capable */
+ if (!dev->phydev)
+ return -ENODEV;
return phylink_ethtool_get_eee(dp->pl, e);
}
-static int dsa_slave_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
+static int dsa_user_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_ksettings_get(dp->pl, cmd);
}
-static int dsa_slave_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
+static int dsa_user_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_ksettings_set(dp->pl, cmd);
}
-static void dsa_slave_get_pause_stats(struct net_device *dev,
- struct ethtool_pause_stats *pause_stats)
+static void dsa_user_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_pause_stats)
ds->ops->get_pause_stats(ds, dp->index, pause_stats);
}
-static void dsa_slave_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
+static void dsa_user_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
phylink_ethtool_get_pauseparam(dp->pl, pause);
}
-static int dsa_slave_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
+static int dsa_user_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_set_pauseparam(dp->pl, pause);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int dsa_slave_netpoll_setup(struct net_device *dev,
- struct netpoll_info *ni)
+static int dsa_user_netpoll_setup(struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct netpoll *netpoll;
int err = 0;
@@ -1237,7 +1322,7 @@ static int dsa_slave_netpoll_setup(struct net_device *dev,
if (!netpoll)
return -ENOMEM;
- err = __netpoll_setup(netpoll, master);
+ err = __netpoll_setup(netpoll, conduit);
if (err) {
kfree(netpoll);
goto out;
@@ -1248,9 +1333,9 @@ out:
return err;
}
-static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+static void dsa_user_netpoll_cleanup(struct net_device *dev)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct netpoll *netpoll = p->netpoll;
if (!netpoll)
@@ -1261,15 +1346,15 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev)
__netpoll_free(netpoll);
}
-static void dsa_slave_poll_controller(struct net_device *dev)
+static void dsa_user_poll_controller(struct net_device *dev)
{
}
#endif
static struct dsa_mall_tc_entry *
-dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
+dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
@@ -1280,13 +1365,13 @@ dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
}
static int
-dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+dsa_user_add_cls_matchall_mirred(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress, bool ingress_target)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_mirror_tc_entry *mirror;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
@@ -1294,11 +1379,19 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
struct dsa_port *to_dp;
int err;
- if (!ds->ops->port_mirror_add)
+ if (cls->common.protocol != htons(ETH_P_ALL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload \"protocol all\" matchall filter");
return -EOPNOTSUPP;
+ }
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!ds->ops->port_mirror_add) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch does not support mirroring operation");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
act = &cls->rule->action.entries[0];
@@ -1306,8 +1399,36 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
if (!act->dev)
return -EINVAL;
- if (!dsa_slave_dev_check(act->dev))
+ if (dsa_user_dev_check(act->dev)) {
+ if (ingress_target) {
+ /* We can only fulfill this using software assist */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to ingress of DSA user port if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ } else {
+ to_dp = dsa_user_to_port(act->dev);
+ }
+ } else {
+ /* Handle mirroring to foreign target ports as a mirror towards
+ * the CPU. The software tc rule will take the packets from
+ * there.
+ */
+ if (cls->common.skip_sw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only mirred to CPU if filter also runs in software");
+ return -EOPNOTSUPP;
+ }
+ to_dp = dp->cpu_dp;
+ }
+
+ if (dp->ds != to_dp->ds) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cross-chip mirroring not implemented");
return -EOPNOTSUPP;
+ }
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
if (!mall_tc_entry)
@@ -1316,9 +1437,6 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
mall_tc_entry->cookie = cls->cookie;
mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
-
- to_dp = dsa_slave_to_port(act->dev);
-
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
@@ -1334,13 +1452,13 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
}
static int
-dsa_slave_add_cls_matchall_police(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+dsa_user_add_cls_matchall_police(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_policer_tc_entry *policer;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
@@ -1359,8 +1477,7 @@ dsa_slave_add_cls_matchall_police(struct net_device *dev,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(&cls->rule->action,
- cls->common.extack))
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action, extack))
return -EOPNOTSUPP;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
@@ -1394,31 +1511,44 @@ dsa_slave_add_cls_matchall_police(struct net_device *dev,
return err;
}
-static int dsa_slave_add_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+static int dsa_user_add_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
- int err = -EOPNOTSUPP;
+ const struct flow_action *action = &cls->rule->action;
+ struct netlink_ext_ack *extack = cls->common.extack;
- if (cls->common.protocol == htons(ETH_P_ALL) &&
- flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
- err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
- else if (flow_offload_has_one_action(&cls->rule->action) &&
- cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
- err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
+ if (!flow_offload_has_one_action(action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload matchall filter with more than one action");
+ return -EOPNOTSUPP;
+ }
- return err;
+ switch (action->entries[0].id) {
+ case FLOW_ACTION_MIRRED:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ false);
+ case FLOW_ACTION_MIRRED_INGRESS:
+ return dsa_user_add_cls_matchall_mirred(dev, cls, ingress,
+ true);
+ case FLOW_ACTION_POLICE:
+ return dsa_user_add_cls_matchall_police(dev, cls, ingress);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown action");
+ break;
+ }
+
+ return -EOPNOTSUPP;
}
-static void dsa_slave_del_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls)
+static void dsa_user_del_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
- mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
+ mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie);
if (!mall_tc_entry)
return;
@@ -1441,29 +1571,29 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
kfree(mall_tc_entry);
}
-static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+static int dsa_user_setup_tc_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
if (cls->common.chain_index)
return -EOPNOTSUPP;
switch (cls->command) {
case TC_CLSMATCHALL_REPLACE:
- return dsa_slave_add_cls_matchall(dev, cls, ingress);
+ return dsa_user_add_cls_matchall(dev, cls, ingress);
case TC_CLSMATCHALL_DESTROY:
- dsa_slave_del_cls_matchall(dev, cls);
+ dsa_user_del_cls_matchall(dev, cls);
return 0;
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_add_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_add_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1473,11 +1603,11 @@ static int dsa_slave_add_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_add(ds, port, cls, ingress);
}
-static int dsa_slave_del_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_del_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1487,11 +1617,11 @@ static int dsa_slave_del_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_del(ds, port, cls, ingress);
}
-static int dsa_slave_stats_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_stats_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1501,24 +1631,24 @@ static int dsa_slave_stats_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_stats(ds, port, cls, ingress);
}
-static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_setup_tc_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
switch (cls->command) {
case FLOW_CLS_REPLACE:
- return dsa_slave_add_cls_flower(dev, cls, ingress);
+ return dsa_user_add_cls_flower(dev, cls, ingress);
case FLOW_CLS_DESTROY:
- return dsa_slave_del_cls_flower(dev, cls, ingress);
+ return dsa_user_del_cls_flower(dev, cls, ingress);
case FLOW_CLS_STATS:
- return dsa_slave_stats_cls_flower(dev, cls, ingress);
+ return dsa_user_stats_cls_flower(dev, cls, ingress);
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
{
struct net_device *dev = cb_priv;
@@ -1527,46 +1657,46 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSMATCHALL:
- return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
+ return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
+ return dsa_user_setup_tc_cls_flower(dev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
+ return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true);
}
-static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
+ return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false);
}
-static LIST_HEAD(dsa_slave_block_cb_list);
+static LIST_HEAD(dsa_user_block_cb_list);
-static int dsa_slave_setup_tc_block(struct net_device *dev,
- struct flow_block_offload *f)
+static int dsa_user_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = dsa_slave_setup_tc_block_cb_ig;
+ cb = dsa_user_setup_tc_block_cb_ig;
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = dsa_slave_setup_tc_block_cb_eg;
+ cb = dsa_user_setup_tc_block_cb_eg;
else
return -EOPNOTSUPP;
- f->driver_block_list = &dsa_slave_block_cb_list;
+ f->driver_block_list = &dsa_user_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
+ if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
@@ -1574,7 +1704,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
return PTR_ERR(block_cb);
flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
+ list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
@@ -1589,28 +1719,28 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
}
}
-static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
- void *type_data)
+static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port,
+ void *type_data)
{
- struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port));
- if (!master->netdev_ops->ndo_setup_tc)
+ if (!conduit->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
- return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
+ return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data);
}
-static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
switch (type) {
case TC_SETUP_BLOCK:
- return dsa_slave_setup_tc_block(dev, type_data);
+ return dsa_user_setup_tc_block(dev, type_data);
case TC_SETUP_FT:
- return dsa_slave_setup_ft_block(ds, dp->index, type_data);
+ return dsa_user_setup_ft_block(ds, dp->index, type_data);
default:
break;
}
@@ -1621,10 +1751,10 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
}
-static int dsa_slave_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *nfc, u32 *rule_locs)
+static int dsa_user_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->get_rxnfc)
@@ -1633,10 +1763,10 @@ static int dsa_slave_get_rxnfc(struct net_device *dev,
return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
}
-static int dsa_slave_set_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dsa_user_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->set_rxnfc)
@@ -1645,10 +1775,10 @@ static int dsa_slave_set_rxnfc(struct net_device *dev,
return ds->ops->set_rxnfc(ds, dp->index, nfc);
}
-static int dsa_slave_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts)
+static int dsa_user_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *ts)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
if (!ds->ops->get_ts_info)
@@ -1657,10 +1787,10 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
return ds->ops->get_ts_info(ds, p->dp->index, ts);
}
-static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
- u16 vid)
+static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.vid = vid,
@@ -1668,6 +1798,9 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
.flags = 0,
};
struct netlink_ext_ack extack = {0};
+ struct dsa_switch *ds = dp->ds;
+ struct netdev_hw_addr *ha;
+ struct dsa_vlan *v;
int ret;
/* User port... */
@@ -1687,39 +1820,118 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
return ret;
}
+ if (!dsa_switch_supports_uc_filtering(ds) &&
+ !dsa_switch_supports_mc_filtering(ds))
+ return 0;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ ret = -ENOMEM;
+ goto rollback;
+ }
+
+ netif_addr_lock_bh(dev);
+
+ v->vid = vid;
+ list_add_tail(&v->list, &dp->user_vlans);
+
+ if (dsa_switch_supports_mc_filtering(ds)) {
+ netdev_for_each_synced_mc_addr(ha, dev) {
+ dsa_user_schedule_standalone_work(dev, DSA_MC_ADD,
+ ha->addr, vid);
+ }
+ }
+
+ if (dsa_switch_supports_uc_filtering(ds)) {
+ netdev_for_each_synced_uc_addr(ha, dev) {
+ dsa_user_schedule_standalone_work(dev, DSA_UC_ADD,
+ ha->addr, vid);
+ }
+ }
+
+ netif_addr_unlock_bh(dev);
+
+ dsa_flush_workqueue();
+
return 0;
+
+rollback:
+ dsa_port_host_vlan_del(dp, &vlan);
+ dsa_port_vlan_del(dp, &vlan);
+
+ return ret;
}
-static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
- u16 vid)
+static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid = vid,
/* This API only allows programming tagged, non-PVID VIDs */
.flags = 0,
};
+ struct dsa_switch *ds = dp->ds;
+ struct netdev_hw_addr *ha;
+ struct dsa_vlan *v;
int err;
err = dsa_port_vlan_del(dp, &vlan);
if (err)
return err;
- return dsa_port_host_vlan_del(dp, &vlan);
+ err = dsa_port_host_vlan_del(dp, &vlan);
+ if (err)
+ return err;
+
+ if (!dsa_switch_supports_uc_filtering(ds) &&
+ !dsa_switch_supports_mc_filtering(ds))
+ return 0;
+
+ netif_addr_lock_bh(dev);
+
+ v = dsa_vlan_find(&dp->user_vlans, &vlan);
+ if (!v) {
+ netif_addr_unlock_bh(dev);
+ return -ENOENT;
+ }
+
+ list_del(&v->list);
+ kfree(v);
+
+ if (dsa_switch_supports_mc_filtering(ds)) {
+ netdev_for_each_synced_mc_addr(ha, dev) {
+ dsa_user_schedule_standalone_work(dev, DSA_MC_DEL,
+ ha->addr, vid);
+ }
+ }
+
+ if (dsa_switch_supports_uc_filtering(ds)) {
+ netdev_for_each_synced_uc_addr(ha, dev) {
+ dsa_user_schedule_standalone_work(dev, DSA_UC_DEL,
+ ha->addr, vid);
+ }
+ }
+
+ netif_addr_unlock_bh(dev);
+
+ dsa_flush_workqueue();
+
+ return 0;
}
-static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
+static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
- return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
+ return dsa_user_vlan_rx_add_vid(arg, proto, vid);
}
-static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
+static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
- return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
+ return dsa_user_vlan_rx_kill_vid(arg, proto, vid);
}
/* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
@@ -1753,26 +1965,26 @@ static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
* - the bridge VLANs
* - the 8021q upper VLANs
*/
-int dsa_slave_manage_vlan_filtering(struct net_device *slave,
- bool vlan_filtering)
+int dsa_user_manage_vlan_filtering(struct net_device *user,
+ bool vlan_filtering)
{
int err;
if (vlan_filtering) {
- slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
+ err = vlan_for_each(user, dsa_user_restore_vlan, user);
if (err) {
- vlan_for_each(slave, dsa_slave_clear_vlan, slave);
- slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ vlan_for_each(user, dsa_user_clear_vlan, user);
+ user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
return err;
}
} else {
- err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
+ err = vlan_for_each(user, dsa_user_clear_vlan, user);
if (err)
return err;
- slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
return 0;
@@ -1843,7 +2055,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
list_for_each_entry(dst, &dsa_tree_list, list) {
list_for_each_entry(other_dp, &dst->ports, list) {
struct dsa_hw_port *hw_port;
- struct net_device *slave;
+ struct net_device *user;
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
@@ -1854,17 +2066,17 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (!other_dp->ds->mtu_enforcement_ingress)
continue;
- slave = other_dp->slave;
+ user = other_dp->user;
- if (min_mtu > slave->mtu)
- min_mtu = slave->mtu;
+ if (min_mtu > user->mtu)
+ min_mtu = user->mtu;
hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
if (!hw_port)
goto out;
- hw_port->dev = slave;
- hw_port->old_mtu = slave->mtu;
+ hw_port->dev = user;
+ hw_port->old_mtu = user->mtu;
list_add(&hw_port->list, &hw_port_list);
}
@@ -1874,7 +2086,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
* interface's MTU first, regardless of whether the intention of the
* user was to raise or lower it.
*/
- err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
+ err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
if (!err)
goto out;
@@ -1888,17 +2100,18 @@ out:
dsa_hw_port_list_free(&hw_port_list);
}
-int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_port *cpu_dp = dp->cpu_dp;
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
int largest_mtu = 0;
- int new_master_mtu;
- int old_master_mtu;
+ int new_conduit_mtu;
+ int old_conduit_mtu;
int mtu_limit;
+ int overhead;
int cpu_mtu;
int err;
@@ -1906,43 +2119,44 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
dsa_tree_for_each_user_port(other_dp, ds->dst) {
- int slave_mtu;
+ int user_mtu;
- /* During probe, this function will be called for each slave
+ /* During probe, this function will be called for each user
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
- if (!other_dp->slave)
+ if (!other_dp->user)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
if (dp == other_dp)
- slave_mtu = new_mtu;
+ user_mtu = new_mtu;
else
- slave_mtu = other_dp->slave->mtu;
+ user_mtu = other_dp->user->mtu;
- if (largest_mtu < slave_mtu)
- largest_mtu = slave_mtu;
+ if (largest_mtu < user_mtu)
+ largest_mtu = user_mtu;
}
- mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
- old_master_mtu = master->mtu;
- new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
- if (new_master_mtu > mtu_limit)
+ overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
+ mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead);
+ old_conduit_mtu = conduit->mtu;
+ new_conduit_mtu = largest_mtu + overhead;
+ if (new_conduit_mtu > mtu_limit)
return -ERANGE;
- /* If the master MTU isn't over limit, there's no need to check the CPU
+ /* If the conduit MTU isn't over limit, there's no need to check the CPU
* MTU, since that surely isn't either.
*/
cpu_mtu = largest_mtu;
/* Start applying stuff */
- if (new_master_mtu != old_master_mtu) {
- err = dev_set_mtu(master, new_master_mtu);
+ if (new_conduit_mtu != old_conduit_mtu) {
+ err = dev_set_mtu(conduit, new_conduit_mtu);
if (err < 0)
- goto out_master_failed;
+ goto out_conduit_failed;
/* We only need to propagate the MTU of the CPU port to
* upstream switches, so emit a notifier which updates them.
@@ -1956,27 +2170,52 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
if (err)
goto out_port_failed;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
dsa_bridge_mtu_normalization(dp);
return 0;
out_port_failed:
- if (new_master_mtu != old_master_mtu)
- dsa_port_mtu_change(cpu_dp, old_master_mtu -
- dsa_tag_protocol_overhead(cpu_dp->tag_ops));
+ if (new_conduit_mtu != old_conduit_mtu)
+ dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead);
out_cpu_failed:
- if (new_master_mtu != old_master_mtu)
- dev_set_mtu(master, old_master_mtu);
-out_master_failed:
+ if (new_conduit_mtu != old_conduit_mtu)
+ dev_set_mtu(conduit, old_conduit_mtu);
+out_conduit_failed:
return err;
}
static int __maybe_unused
-dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_set_apptrust(struct net_device *dev, u8 *sel, int nsel)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->port_set_apptrust)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_set_apptrust(ds, port, sel, nsel);
+}
+
+static int __maybe_unused
+dsa_user_dcbnl_get_apptrust(struct net_device *dev, u8 *sel, int *nsel)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->port_get_apptrust)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_get_apptrust(ds, port, sel, nsel);
+}
+
+static int __maybe_unused
+dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2000,10 +2239,62 @@ dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
return 0;
}
+/* Update the DSCP prio entries on all user ports of the switch in case
+ * the switch supports global DSCP prio instead of per port DSCP prios.
+ */
+static int dsa_user_dcbnl_ieee_global_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app, bool del)
+{
+ int (*setdel)(struct net_device *dev, struct dcb_app *app);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ struct dsa_port *other_dp;
+ int err, restore_err;
+
+ if (del)
+ setdel = dcb_ieee_delapp;
+ else
+ setdel = dcb_ieee_setapp;
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *user = other_dp->user;
+
+ if (!user || user == dev)
+ continue;
+
+ err = setdel(user, app);
+ if (err)
+ goto err_try_to_restore;
+ }
+
+ return 0;
+
+err_try_to_restore:
+
+ /* Revert logic to restore previous state of app entries */
+ if (!del)
+ setdel = dcb_ieee_delapp;
+ else
+ setdel = dcb_ieee_setapp;
+
+ dsa_switch_for_each_user_port_continue_reverse(other_dp, ds) {
+ struct net_device *user = other_dp->user;
+
+ if (!user || user == dev)
+ continue;
+
+ restore_err = setdel(user, app);
+ if (restore_err)
+ netdev_err(user, "Failed to restore DSCP prio entry configuration\n");
+ }
+
+ return err;
+}
+
static int __maybe_unused
-dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2031,32 +2322,43 @@ dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
return err;
}
+ if (!ds->dscp_prio_mapping_is_global)
+ return 0;
+
+ err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, false);
+ if (err) {
+ if (ds->ops->port_del_dscp_prio)
+ ds->ops->port_del_dscp_prio(ds, port, dscp, new_prio);
+ dcb_ieee_delapp(dev, app);
+ return err;
+ }
+
return 0;
}
-static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
- struct dcb_app *app)
+static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
- return dsa_slave_dcbnl_set_default_prio(dev, app);
+ return dsa_user_dcbnl_set_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
- return dsa_slave_dcbnl_add_dscp_prio(dev, app);
+ return dsa_user_dcbnl_add_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
}
static int __maybe_unused
-dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2081,9 +2383,9 @@ dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
}
static int __maybe_unused
-dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err, port = dp->index;
u8 dscp = app->protocol;
@@ -2101,23 +2403,35 @@ dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
return err;
}
+ if (!ds->dscp_prio_mapping_is_global)
+ return 0;
+
+ err = dsa_user_dcbnl_ieee_global_dscp_setdel(dev, app, true);
+ if (err) {
+ if (ds->ops->port_add_dscp_prio)
+ ds->ops->port_add_dscp_prio(ds, port, dscp,
+ app->priority);
+ dcb_ieee_setapp(dev, app);
+ return err;
+ }
+
return 0;
}
-static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
- struct dcb_app *app)
+static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
- return dsa_slave_dcbnl_del_default_prio(dev, app);
+ return dsa_user_dcbnl_del_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
- return dsa_slave_dcbnl_del_dscp_prio(dev, app);
+ return dsa_user_dcbnl_del_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
@@ -2126,9 +2440,9 @@ static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
/* Pre-populate the DCB application priority table with the priorities
* configured during switch setup, which we read from hardware here.
*/
-static int dsa_slave_dcbnl_init(struct net_device *dev)
+static int dsa_user_dcbnl_init(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
@@ -2176,46 +2490,52 @@ static int dsa_slave_dcbnl_init(struct net_device *dev)
return 0;
}
-static const struct ethtool_ops dsa_slave_ethtool_ops = {
- .get_drvinfo = dsa_slave_get_drvinfo,
- .get_regs_len = dsa_slave_get_regs_len,
- .get_regs = dsa_slave_get_regs,
- .nway_reset = dsa_slave_nway_reset,
+static const struct ethtool_ops dsa_user_ethtool_ops = {
+ .get_drvinfo = dsa_user_get_drvinfo,
+ .get_regs_len = dsa_user_get_regs_len,
+ .get_regs = dsa_user_get_regs,
+ .nway_reset = dsa_user_nway_reset,
.get_link = ethtool_op_get_link,
- .get_eeprom_len = dsa_slave_get_eeprom_len,
- .get_eeprom = dsa_slave_get_eeprom,
- .set_eeprom = dsa_slave_set_eeprom,
- .get_strings = dsa_slave_get_strings,
- .get_ethtool_stats = dsa_slave_get_ethtool_stats,
- .get_sset_count = dsa_slave_get_sset_count,
- .get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
- .get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
- .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
- .get_rmon_stats = dsa_slave_get_rmon_stats,
- .set_wol = dsa_slave_set_wol,
- .get_wol = dsa_slave_get_wol,
- .set_eee = dsa_slave_set_eee,
- .get_eee = dsa_slave_get_eee,
- .get_link_ksettings = dsa_slave_get_link_ksettings,
- .set_link_ksettings = dsa_slave_set_link_ksettings,
- .get_pause_stats = dsa_slave_get_pause_stats,
- .get_pauseparam = dsa_slave_get_pauseparam,
- .set_pauseparam = dsa_slave_set_pauseparam,
- .get_rxnfc = dsa_slave_get_rxnfc,
- .set_rxnfc = dsa_slave_set_rxnfc,
- .get_ts_info = dsa_slave_get_ts_info,
- .self_test = dsa_slave_net_selftest,
+ .get_eeprom_len = dsa_user_get_eeprom_len,
+ .get_eeprom = dsa_user_get_eeprom,
+ .set_eeprom = dsa_user_set_eeprom,
+ .get_strings = dsa_user_get_strings,
+ .get_ethtool_stats = dsa_user_get_ethtool_stats,
+ .get_sset_count = dsa_user_get_sset_count,
+ .get_eth_phy_stats = dsa_user_get_eth_phy_stats,
+ .get_eth_mac_stats = dsa_user_get_eth_mac_stats,
+ .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
+ .get_rmon_stats = dsa_user_get_rmon_stats,
+ .get_ts_stats = dsa_user_get_ts_stats,
+ .set_wol = dsa_user_set_wol,
+ .get_wol = dsa_user_get_wol,
+ .set_eee = dsa_user_set_eee,
+ .get_eee = dsa_user_get_eee,
+ .get_link_ksettings = dsa_user_get_link_ksettings,
+ .set_link_ksettings = dsa_user_set_link_ksettings,
+ .get_pause_stats = dsa_user_get_pause_stats,
+ .get_pauseparam = dsa_user_get_pauseparam,
+ .set_pauseparam = dsa_user_set_pauseparam,
+ .get_rxnfc = dsa_user_get_rxnfc,
+ .set_rxnfc = dsa_user_set_rxnfc,
+ .get_ts_info = dsa_user_get_ts_info,
+ .self_test = dsa_user_net_selftest,
+ .get_mm = dsa_user_get_mm,
+ .set_mm = dsa_user_set_mm,
+ .get_mm_stats = dsa_user_get_mm_stats,
};
-static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
- .ieee_setapp = dsa_slave_dcbnl_ieee_setapp,
- .ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
+static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
+ .ieee_setapp = dsa_user_dcbnl_ieee_setapp,
+ .ieee_delapp = dsa_user_dcbnl_ieee_delapp,
+ .dcbnl_setapptrust = dsa_user_dcbnl_set_apptrust,
+ .dcbnl_getapptrust = dsa_user_dcbnl_get_apptrust,
};
-static void dsa_slave_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *s)
+static void dsa_user_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_stats64)
@@ -2224,46 +2544,73 @@ static void dsa_slave_get_stats64(struct net_device *dev,
dev_get_tstats64(dev, s);
}
-static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
- struct net_device_path *path)
+static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
{
- struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
- struct net_device *master = dsa_port_to_master(dp);
+ struct dsa_port *dp = dsa_user_to_port(ctx->dev);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_port *cpu_dp = dp->cpu_dp;
path->dev = ctx->dev;
path->type = DEV_PATH_DSA;
path->dsa.proto = cpu_dp->tag_ops->proto;
path->dsa.port = dp->index;
- ctx->dev = master;
+ ctx->dev = conduit;
return 0;
}
-static const struct net_device_ops dsa_slave_netdev_ops = {
- .ndo_open = dsa_slave_open,
- .ndo_stop = dsa_slave_close,
- .ndo_start_xmit = dsa_slave_xmit,
- .ndo_change_rx_flags = dsa_slave_change_rx_flags,
- .ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_fdb_dump = dsa_slave_fdb_dump,
- .ndo_eth_ioctl = dsa_slave_ioctl,
- .ndo_get_iflink = dsa_slave_get_iflink,
+static int dsa_user_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_hwtstamp_get)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_hwtstamp_get(ds, dp->index, cfg);
+}
+
+static int dsa_user_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_hwtstamp_set)
+ return -EOPNOTSUPP;
+
+ return ds->ops->port_hwtstamp_set(ds, dp->index, cfg, extack);
+}
+
+static const struct net_device_ops dsa_user_netdev_ops = {
+ .ndo_open = dsa_user_open,
+ .ndo_stop = dsa_user_close,
+ .ndo_start_xmit = dsa_user_xmit,
+ .ndo_change_rx_flags = dsa_user_change_rx_flags,
+ .ndo_set_rx_mode = dsa_user_set_rx_mode,
+ .ndo_set_mac_address = dsa_user_set_mac_address,
+ .ndo_fdb_dump = dsa_user_fdb_dump,
+ .ndo_eth_ioctl = dsa_user_ioctl,
+ .ndo_get_iflink = dsa_user_get_iflink,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_netpoll_setup = dsa_slave_netpoll_setup,
- .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
- .ndo_poll_controller = dsa_slave_poll_controller,
+ .ndo_netpoll_setup = dsa_user_netpoll_setup,
+ .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup,
+ .ndo_poll_controller = dsa_user_poll_controller,
#endif
- .ndo_setup_tc = dsa_slave_setup_tc,
- .ndo_get_stats64 = dsa_slave_get_stats64,
- .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
- .ndo_change_mtu = dsa_slave_change_mtu,
- .ndo_fill_forward_path = dsa_slave_fill_forward_path,
+ .ndo_setup_tc = dsa_user_setup_tc,
+ .ndo_get_stats64 = dsa_user_get_stats64,
+ .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
+ .ndo_change_mtu = dsa_user_change_mtu,
+ .ndo_fill_forward_path = dsa_user_fill_forward_path,
+ .ndo_hwtstamp_get = dsa_user_hwtstamp_get,
+ .ndo_hwtstamp_set = dsa_user_hwtstamp_set,
};
-static struct device_type dsa_type = {
+static const struct device_type dsa_type = {
.name = "dsa",
};
@@ -2276,10 +2623,10 @@ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
}
EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
-static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void dsa_user_phylink_fixed_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
- struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
struct dsa_switch *ds = dp->ds;
/* No need to check that this operation is valid, the callback would
@@ -2288,33 +2635,33 @@ static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
ds->ops->phylink_fixed_state(ds, dp->index, state);
}
-/* slave device setup *******************************************************/
-static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
- u32 flags)
+/* user device setup *******************************************************/
+static int dsa_user_phy_connect(struct net_device *user_dev, int addr,
+ u32 flags)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
struct dsa_switch *ds = dp->ds;
- slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
- if (!slave_dev->phydev) {
- netdev_err(slave_dev, "no phy at %d\n", addr);
+ user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr);
+ if (!user_dev->phydev) {
+ netdev_err(user_dev, "no phy at %d\n", addr);
return -ENODEV;
}
- slave_dev->phydev->dev_flags |= flags;
+ user_dev->phydev->dev_flags |= flags;
- return phylink_connect_phy(dp->pl, slave_dev->phydev);
+ return phylink_connect_phy(dp->pl, user_dev->phydev);
}
-static int dsa_slave_phy_setup(struct net_device *slave_dev)
+static int dsa_user_phy_setup(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
u32 phy_flags = 0;
int ret;
- dp->pl_config.dev = &slave_dev->dev;
+ dp->pl_config.dev = &user_dev->dev;
dp->pl_config.type = PHYLINK_NETDEV;
/* The get_fixed_state callback takes precedence over polling the
@@ -2322,7 +2669,7 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
* this if the switch provides such a callback.
*/
if (ds->ops->phylink_fixed_state) {
- dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
+ dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state;
dp->pl_config.poll_fixed_state = true;
}
@@ -2334,14 +2681,14 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
phy_flags = ds->ops->get_phy_flags(ds, dp->index);
ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
- if (ret == -ENODEV && ds->slave_mii_bus) {
+ if (ret == -ENODEV && ds->user_mii_bus) {
/* We could not connect to a designated PHY or SFP, so try to
* use the switch internal MDIO bus instead
*/
- ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
+ ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags);
}
if (ret) {
- netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+ netdev_err(user_dev, "failed to connect to PHY: %pe\n",
ERR_PTR(ret));
dsa_port_phylink_destroy(dp);
}
@@ -2349,42 +2696,43 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
return ret;
}
-void dsa_slave_setup_tagger(struct net_device *slave)
+void dsa_user_setup_tagger(struct net_device *user)
{
- struct dsa_port *dp = dsa_slave_to_port(slave);
- struct net_device *master = dsa_port_to_master(dp);
- struct dsa_slave_priv *p = netdev_priv(slave);
+ struct dsa_port *dp = dsa_user_to_port(user);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
+ struct dsa_user_priv *p = netdev_priv(user);
const struct dsa_port *cpu_dp = dp->cpu_dp;
const struct dsa_switch *ds = dp->ds;
- slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
- slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
- /* Try to save one extra realloc later in the TX path (in the master)
- * by also inheriting the master's needed headroom and tailroom.
+ user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
+ user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
+ /* Try to save one extra realloc later in the TX path (in the conduit)
+ * by also inheriting the conduit's needed headroom and tailroom.
* The 8021q driver also does this.
*/
- slave->needed_headroom += master->needed_headroom;
- slave->needed_tailroom += master->needed_tailroom;
+ user->needed_headroom += conduit->needed_headroom;
+ user->needed_tailroom += conduit->needed_tailroom;
p->xmit = cpu_dp->tag_ops->xmit;
- slave->features = master->vlan_features | NETIF_F_HW_TC;
- slave->hw_features |= NETIF_F_HW_TC;
- slave->features |= NETIF_F_LLTX;
- if (slave->needed_tailroom)
- slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ user->features = conduit->vlan_features | NETIF_F_HW_TC;
+ user->hw_features |= NETIF_F_HW_TC;
+ if (user->needed_tailroom)
+ user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
- slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ user->lltx = true;
}
-int dsa_slave_suspend(struct net_device *slave_dev)
+int dsa_user_suspend(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
- if (!netif_running(slave_dev))
+ if (!netif_running(user_dev))
return 0;
- netif_device_detach(slave_dev);
+ netif_device_detach(user_dev);
rtnl_lock();
phylink_stop(dp->pl);
@@ -2393,14 +2741,14 @@ int dsa_slave_suspend(struct net_device *slave_dev)
return 0;
}
-int dsa_slave_resume(struct net_device *slave_dev)
+int dsa_user_resume(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
- if (!netif_running(slave_dev))
+ if (!netif_running(user_dev))
return 0;
- netif_device_attach(slave_dev);
+ netif_device_attach(user_dev);
rtnl_lock();
phylink_start(dp->pl);
@@ -2409,12 +2757,12 @@ int dsa_slave_resume(struct net_device *slave_dev)
return 0;
}
-int dsa_slave_create(struct dsa_port *port)
+int dsa_user_create(struct dsa_port *port)
{
- struct net_device *master = dsa_port_to_master(port);
+ struct net_device *conduit = dsa_port_to_conduit(port);
struct dsa_switch *ds = port->ds;
- struct net_device *slave_dev;
- struct dsa_slave_priv *p;
+ struct net_device *user_dev;
+ struct dsa_user_priv *p;
const char *name;
int assign_type;
int ret;
@@ -2430,55 +2778,51 @@ int dsa_slave_create(struct dsa_port *port)
assign_type = NET_NAME_ENUM;
}
- slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
- assign_type, ether_setup,
- ds->num_tx_queues, 1);
- if (slave_dev == NULL)
+ user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name,
+ assign_type, ether_setup,
+ ds->num_tx_queues, 1);
+ if (user_dev == NULL)
return -ENOMEM;
- slave_dev->rtnl_link_ops = &dsa_link_ops;
- slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+ user_dev->rtnl_link_ops = &dsa_link_ops;
+ user_dev->ethtool_ops = &dsa_user_ethtool_ops;
#if IS_ENABLED(CONFIG_DCB)
- slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
+ user_dev->dcbnl_ops = &dsa_user_dcbnl_ops;
#endif
if (!is_zero_ether_addr(port->mac))
- eth_hw_addr_set(slave_dev, port->mac);
+ eth_hw_addr_set(user_dev, port->mac);
else
- eth_hw_addr_inherit(slave_dev, master);
- slave_dev->priv_flags |= IFF_NO_QUEUE;
+ eth_hw_addr_inherit(user_dev, conduit);
+ user_dev->priv_flags |= IFF_NO_QUEUE;
if (dsa_switch_supports_uc_filtering(ds))
- slave_dev->priv_flags |= IFF_UNICAST_FLT;
- slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+ user_dev->priv_flags |= IFF_UNICAST_FLT;
+ user_dev->netdev_ops = &dsa_user_netdev_ops;
if (ds->ops->port_max_mtu)
- slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
- SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
-
- SET_NETDEV_DEV(slave_dev, port->ds->dev);
- SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
- slave_dev->dev.of_node = port->dn;
- slave_dev->vlan_features = master->vlan_features;
-
- p = netdev_priv(slave_dev);
- slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!slave_dev->tstats) {
- free_netdev(slave_dev);
- return -ENOMEM;
- }
+ user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
+ SET_NETDEV_DEVTYPE(user_dev, &dsa_type);
+
+ SET_NETDEV_DEV(user_dev, port->ds->dev);
+ SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port);
+ user_dev->dev.of_node = port->dn;
+ user_dev->vlan_features = conduit->vlan_features;
+
+ p = netdev_priv(user_dev);
+ user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
- ret = gro_cells_init(&p->gcells, slave_dev);
+ ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
goto out_free;
p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list);
- port->slave = slave_dev;
- dsa_slave_setup_tagger(slave_dev);
+ port->user = user_dev;
+ dsa_user_setup_tagger(user_dev);
- netif_carrier_off(slave_dev);
+ netif_carrier_off(user_dev);
- ret = dsa_slave_phy_setup(slave_dev);
+ ret = dsa_user_phy_setup(user_dev);
if (ret) {
- netdev_err(slave_dev,
+ netdev_err(user_dev,
"error %d setting up PHY for tree %d, switch %d, port %d\n",
ret, ds->dst->index, ds->index, port->index);
goto out_gcells;
@@ -2486,23 +2830,23 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN);
if (ret && ret != -EOPNOTSUPP)
dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
ret, ETH_DATA_LEN, port->index);
- ret = register_netdevice(slave_dev);
+ ret = register_netdevice(user_dev);
if (ret) {
- netdev_err(master, "error %d registering interface %s\n",
- ret, slave_dev->name);
+ netdev_err(conduit, "error %d registering interface %s\n",
+ ret, user_dev->name);
rtnl_unlock();
goto out_phy;
}
if (IS_ENABLED(CONFIG_DCB)) {
- ret = dsa_slave_dcbnl_init(slave_dev);
+ ret = dsa_user_dcbnl_init(user_dev);
if (ret) {
- netdev_err(slave_dev,
+ netdev_err(user_dev,
"failed to initialize DCB: %pe\n",
ERR_PTR(ret));
rtnl_unlock();
@@ -2510,7 +2854,7 @@ int dsa_slave_create(struct dsa_port *port)
}
}
- ret = netdev_upper_dev_link(master, slave_dev, NULL);
+ ret = netdev_upper_dev_link(conduit, user_dev, NULL);
rtnl_unlock();
@@ -2520,7 +2864,7 @@ int dsa_slave_create(struct dsa_port *port)
return 0;
out_unregister:
- unregister_netdev(slave_dev);
+ unregister_netdev(user_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -2529,124 +2873,117 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(slave_dev->tstats);
- free_netdev(slave_dev);
- port->slave = NULL;
+ free_netdev(user_dev);
+ port->user = NULL;
return ret;
}
-void dsa_slave_destroy(struct net_device *slave_dev)
+void dsa_user_destroy(struct net_device *user_dev)
{
- struct net_device *master = dsa_slave_to_master(slave_dev);
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
- struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ struct net_device *conduit = dsa_user_to_conduit(user_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
+ struct dsa_user_priv *p = netdev_priv(user_dev);
- netif_carrier_off(slave_dev);
+ netif_carrier_off(user_dev);
rtnl_lock();
- netdev_upper_dev_unlink(master, slave_dev);
- unregister_netdevice(slave_dev);
+ netdev_upper_dev_unlink(conduit, user_dev);
+ unregister_netdevice(user_dev);
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(slave_dev->tstats);
- free_netdev(slave_dev);
+ free_netdev(user_dev);
}
-int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
+int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
struct netlink_ext_ack *extack)
{
- struct net_device *old_master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *old_conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct net_device *upper;
struct list_head *iter;
int err;
- if (master == old_master)
+ if (conduit == old_conduit)
return 0;
- if (!ds->ops->port_change_master) {
+ if (!ds->ops->port_change_conduit) {
NL_SET_ERR_MSG_MOD(extack,
- "Driver does not support changing DSA master");
+ "Driver does not support changing DSA conduit");
return -EOPNOTSUPP;
}
- if (!netdev_uses_dsa(master)) {
+ if (!netdev_uses_dsa(conduit)) {
NL_SET_ERR_MSG_MOD(extack,
- "Interface not eligible as DSA master");
+ "Interface not eligible as DSA conduit");
return -EOPNOTSUPP;
}
- netdev_for_each_upper_dev_rcu(master, upper, iter) {
- if (dsa_slave_dev_check(upper))
+ netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
+ if (dsa_user_dev_check(upper))
continue;
if (netif_is_bridge_master(upper))
continue;
- NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers");
return -EOPNOTSUPP;
}
- /* Since we allow live-changing the DSA master, plus we auto-open the
- * DSA master when the user port opens => we need to ensure that the
- * new DSA master is open too.
+ /* Since we allow live-changing the DSA conduit, plus we auto-open the
+ * DSA conduit when the user port opens => we need to ensure that the
+ * new DSA conduit is open too.
*/
if (dev->flags & IFF_UP) {
- err = dev_open(master, extack);
+ err = dev_open(conduit, extack);
if (err)
return err;
}
- netdev_upper_dev_unlink(old_master, dev);
+ netdev_upper_dev_unlink(old_conduit, dev);
- err = netdev_upper_dev_link(master, dev, extack);
+ err = netdev_upper_dev_link(conduit, dev, extack);
if (err)
- goto out_revert_old_master_unlink;
+ goto out_revert_old_conduit_unlink;
- err = dsa_port_change_master(dp, master, extack);
+ err = dsa_port_change_conduit(dp, conduit, extack);
if (err)
- goto out_revert_master_link;
+ goto out_revert_conduit_link;
/* Update the MTU of the new CPU port through cross-chip notifiers */
- err = dsa_slave_change_mtu(dev, dev->mtu);
+ err = dsa_user_change_mtu(dev, dev->mtu);
if (err && err != -EOPNOTSUPP) {
netdev_warn(dev,
- "nonfatal error updating MTU with new master: %pe\n",
+ "nonfatal error updating MTU with new conduit: %pe\n",
ERR_PTR(err));
}
- /* If the port doesn't have its own MAC address and relies on the DSA
- * master's one, inherit it again from the new DSA master.
- */
- if (is_zero_ether_addr(dp->mac))
- eth_hw_addr_inherit(dev, master);
-
return 0;
-out_revert_master_link:
- netdev_upper_dev_unlink(master, dev);
-out_revert_old_master_unlink:
- netdev_upper_dev_link(old_master, dev, NULL);
+out_revert_conduit_link:
+ netdev_upper_dev_unlink(conduit, dev);
+out_revert_old_conduit_unlink:
+ netdev_upper_dev_link(old_conduit, dev, NULL);
return err;
}
-bool dsa_slave_dev_check(const struct net_device *dev)
+bool dsa_user_dev_check(const struct net_device *dev)
{
- return dev->netdev_ops == &dsa_slave_netdev_ops;
+ return dev->netdev_ops == &dsa_user_netdev_ops;
}
-EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
+EXPORT_SYMBOL_GPL(dsa_user_dev_check);
-static int dsa_slave_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_user_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
+ struct dsa_port *dp;
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return err;
+ dp = dsa_user_to_port(dev);
extack = netdev_notifier_info_to_extack(&info->info);
if (netif_is_bridge_master(info->upper_dev)) {
@@ -2655,9 +2992,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (!err)
dsa_bridge_mtu_normalization(dp);
if (err == -EOPNOTSUPP) {
- if (extack && !extack->_msg)
- NL_SET_ERR_MSG_MOD(extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
@@ -2670,8 +3006,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
err = dsa_port_lag_join(dp, info->upper_dev,
info->upper_info, extack);
if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
@@ -2681,10 +3017,10 @@ static int dsa_slave_changeupper(struct net_device *dev,
}
} else if (is_hsr_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_port_hsr_join(dp, info->upper_dev);
+ err = dsa_port_hsr_join(dp, info->upper_dev, extack);
if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Offloading not supported");
+ NL_SET_ERR_MSG_WEAK_MOD(extack,
+ "Offloading not supported");
err = 0;
}
err = notifier_from_errno(err);
@@ -2697,28 +3033,30 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_user_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp;
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return NOTIFY_DONE;
+ dp = dsa_user_to_port(dev);
+
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
dsa_port_pre_bridge_leave(dp, info->upper_dev);
else if (netif_is_lag_master(info->upper_dev) && !info->linking)
dsa_port_pre_lag_leave(dp, info->upper_dev);
- /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
- * meaningfully enslaved to a bridge yet
+ /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot
+ * meaningfully placed under a bridge yet
*/
return NOTIFY_DONE;
}
static int
-dsa_slave_lag_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_lag_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
@@ -2729,15 +3067,15 @@ dsa_slave_lag_changeupper(struct net_device *dev,
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
- if (!dsa_slave_dev_check(lower))
+ if (!dsa_user_dev_check(lower))
continue;
- dp = dsa_slave_to_port(lower);
+ dp = dsa_user_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
- err = dsa_slave_changeupper(lower, info);
+ err = dsa_user_changeupper(lower, info);
if (notifier_to_errno(err))
break;
}
@@ -2745,12 +3083,12 @@ dsa_slave_lag_changeupper(struct net_device *dev,
return err;
}
-/* Same as dsa_slave_lag_changeupper() except that it calls
- * dsa_slave_prechangeupper()
+/* Same as dsa_user_lag_changeupper() except that it calls
+ * dsa_user_prechangeupper()
*/
static int
-dsa_slave_lag_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_lag_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
@@ -2761,15 +3099,15 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
- if (!dsa_slave_dev_check(lower))
+ if (!dsa_user_dev_check(lower))
continue;
- dp = dsa_slave_to_port(lower);
+ dp = dsa_user_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
- err = dsa_slave_prechangeupper(lower, info);
+ err = dsa_user_prechangeupper(lower, info);
if (notifier_to_errno(err))
break;
}
@@ -2782,7 +3120,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
- struct net_device *slave, *br;
+ struct net_device *user, *br;
struct dsa_port *dp;
ext_ack = netdev_notifier_info_to_extack(&info->info);
@@ -2790,11 +3128,11 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
if (!is_vlan_dev(dev))
return NOTIFY_DONE;
- slave = vlan_dev_real_dev(dev);
- if (!dsa_slave_dev_check(slave))
+ user = vlan_dev_real_dev(dev);
+ if (!dsa_user_dev_check(user))
return NOTIFY_DONE;
- dp = dsa_slave_to_port(slave);
+ dp = dsa_user_to_port(user);
br = dsa_port_bridge_dev_get(dp);
if (!br)
return NOTIFY_DONE;
@@ -2803,7 +3141,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
if (br_vlan_enabled(br) &&
netif_is_bridge_master(info->upper_dev) && info->linking) {
NL_SET_ERR_MSG_MOD(ext_ack,
- "Cannot enslave VLAN device into VLAN aware bridge");
+ "Cannot make VLAN device join VLAN-aware bridge");
return notifier_from_errno(-EINVAL);
}
@@ -2811,10 +3149,10 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
}
static int
-dsa_slave_check_8021q_upper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_check_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct bridge_vlan_info br_info;
struct netlink_ext_ack *extack;
@@ -2842,17 +3180,17 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
}
static int
-dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_prechangeupper_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct dsa_switch *ds;
struct dsa_port *dp;
int err;
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return dsa_prevent_bridging_8021q_upper(dev, info);
- dp = dsa_slave_to_port(dev);
+ dp = dsa_user_to_port(dev);
ds = dp->ds;
if (ds->ops->port_prechangeupper) {
@@ -2862,17 +3200,17 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
}
if (is_vlan_dev(info->upper_dev))
- return dsa_slave_check_8021q_upper(dev, info);
+ return dsa_user_check_8021q_upper(dev, info);
return NOTIFY_DONE;
}
-/* To be eligible as a DSA master, a LAG must have all lower interfaces be
- * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
+/* To be eligible as a DSA conduit, a LAG must have all lower interfaces be
+ * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of
* switches in the same switch tree.
*/
-static int dsa_lag_master_validate(struct net_device *lag_dev,
- struct netlink_ext_ack *extack)
+static int dsa_lag_conduit_validate(struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *lower1, *lower2;
struct list_head *iter1, *iter2;
@@ -2882,7 +3220,7 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
if (!netdev_uses_dsa(lower1) ||
!netdev_uses_dsa(lower2)) {
NL_SET_ERR_MSG_MOD(extack,
- "All LAG ports must be eligible as DSA masters");
+ "All LAG ports must be eligible as DSA conduits");
return notifier_from_errno(-EINVAL);
}
@@ -2892,7 +3230,7 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
if (!dsa_port_tree_same(lower1->dsa_ptr,
lower2->dsa_ptr)) {
NL_SET_ERR_MSG_MOD(extack,
- "LAG contains DSA masters of disjoint switch trees");
+ "LAG contains DSA conduits of disjoint switch trees");
return notifier_from_errno(-EINVAL);
}
}
@@ -2902,41 +3240,41 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
}
static int
-dsa_master_prechangeupper_sanity_check(struct net_device *master,
- struct netdev_notifier_changeupper_info *info)
+dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
- if (!netdev_uses_dsa(master))
+ if (!netdev_uses_dsa(conduit))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
/* Allow DSA switch uppers */
- if (dsa_slave_dev_check(info->upper_dev))
+ if (dsa_user_dev_check(info->upper_dev))
return NOTIFY_DONE;
- /* Allow bridge uppers of DSA masters, subject to further
+ /* Allow bridge uppers of DSA conduits, subject to further
* restrictions in dsa_bridge_prechangelower_sanity_check()
*/
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;
/* Allow LAG uppers, subject to further restrictions in
- * dsa_lag_master_prechangelower_sanity_check()
+ * dsa_lag_conduit_prechangelower_sanity_check()
*/
if (netif_is_lag_master(info->upper_dev))
- return dsa_lag_master_validate(info->upper_dev, extack);
+ return dsa_lag_conduit_validate(info->upper_dev, extack);
NL_SET_ERR_MSG_MOD(extack,
- "DSA master cannot join unknown upper interfaces");
+ "DSA conduit cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}
static int
-dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
struct net_device *lag_dev = info->upper_dev;
@@ -2951,14 +3289,14 @@ dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
if (!netdev_uses_dsa(dev)) {
NL_SET_ERR_MSG(extack,
- "Only DSA masters can join a LAG DSA master");
+ "Only DSA conduits can join a LAG DSA conduit");
return notifier_from_errno(-EINVAL);
}
netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
NL_SET_ERR_MSG(extack,
- "Interface is DSA master for a different switch tree than this LAG");
+ "Interface is DSA conduit for a different switch tree than this LAG");
return notifier_from_errno(-EINVAL);
}
@@ -2968,13 +3306,13 @@ dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
return NOTIFY_DONE;
}
-/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
+/* Don't allow bridging of DSA conduits, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
* layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
* frames).
* The only case where that would not be an issue is when bridging can already
- * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
+ * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev
* port, and is bridged only with other ports from the same hardware device.
*/
static int
@@ -3000,7 +3338,7 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
if (!netdev_port_same_parent_id(lower, new_lower)) {
NL_SET_ERR_MSG(extack,
- "Cannot do software bridging with a DSA master");
+ "Cannot do software bridging with a DSA conduit");
return notifier_from_errno(-EINVAL);
}
}
@@ -3008,45 +3346,45 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
return NOTIFY_DONE;
}
-static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
- struct net_device *lag_dev)
+static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
{
- struct net_device *new_master = dsa_tree_find_first_master(dst);
+ struct net_device *new_conduit = dsa_tree_find_first_conduit(dst);
struct dsa_port *dp;
int err;
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp) != lag_dev)
+ if (dsa_port_to_conduit(dp) != lag_dev)
continue;
- err = dsa_slave_change_master(dp->slave, new_master, NULL);
+ err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
if (err) {
- netdev_err(dp->slave,
- "failed to restore master to %s: %pe\n",
- new_master->name, ERR_PTR(err));
+ netdev_err(dp->user,
+ "failed to restore conduit to %s: %pe\n",
+ new_conduit->name, ERR_PTR(err));
}
}
}
-static int dsa_master_lag_join(struct net_device *master,
- struct net_device *lag_dev,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack)
+static int dsa_conduit_lag_join(struct net_device *conduit,
+ struct net_device *lag_dev,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
int err;
- err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
+ err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack);
if (err)
return err;
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp) != master)
+ if (dsa_port_to_conduit(dp) != conduit)
continue;
- err = dsa_slave_change_master(dp->slave, lag_dev, extack);
+ err = dsa_user_change_conduit(dp->user, lag_dev, extack);
if (err)
goto restore;
}
@@ -3055,24 +3393,24 @@ static int dsa_master_lag_join(struct net_device *master,
restore:
dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
- if (dsa_port_to_master(dp) != lag_dev)
+ if (dsa_port_to_conduit(dp) != lag_dev)
continue;
- err = dsa_slave_change_master(dp->slave, master, NULL);
+ err = dsa_user_change_conduit(dp->user, conduit, NULL);
if (err) {
- netdev_err(dp->slave,
- "failed to restore master to %s: %pe\n",
- master->name, ERR_PTR(err));
+ netdev_err(dp->user,
+ "failed to restore conduit to %s: %pe\n",
+ conduit->name, ERR_PTR(err));
}
}
- dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+ dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
return err;
}
-static void dsa_master_lag_leave(struct net_device *master,
- struct net_device *lag_dev)
+static void dsa_conduit_lag_leave(struct net_device *conduit,
+ struct net_device *lag_dev)
{
struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
@@ -3089,10 +3427,10 @@ static void dsa_master_lag_leave(struct net_device *master,
if (new_cpu_dp) {
/* Update the CPU port of the user ports still under the LAG
- * so that dsa_port_to_master() continues to work properly
+ * so that dsa_port_to_conduit() continues to work properly
*/
dsa_tree_for_each_user_port(dp, dst)
- if (dsa_port_to_master(dp) == lag_dev)
+ if (dsa_port_to_conduit(dp) == lag_dev)
dp->cpu_dp = new_cpu_dp;
/* Update the index of the virtual CPU port to match the lowest
@@ -3101,20 +3439,20 @@ static void dsa_master_lag_leave(struct net_device *master,
lag_dev->dsa_ptr = new_cpu_dp;
wmb();
} else {
- /* If the LAG DSA master has no ports left, migrate back all
+ /* If the LAG DSA conduit has no ports left, migrate back all
* user ports to the first physical CPU port
*/
- dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
+ dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev);
}
- /* This DSA master has left its LAG in any case, so let
+ /* This DSA conduit has left its LAG in any case, so let
* the CPU port leave the hardware LAG as well
*/
- dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+ dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
}
-static int dsa_master_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_conduit_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
@@ -3126,11 +3464,11 @@ static int dsa_master_changeupper(struct net_device *dev,
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_master_lag_join(dev, info->upper_dev,
- info->upper_info, extack);
+ err = dsa_conduit_lag_join(dev, info->upper_dev,
+ info->upper_info, extack);
err = notifier_from_errno(err);
} else {
- dsa_master_lag_leave(dev, info->upper_dev);
+ dsa_conduit_lag_leave(dev, info->upper_dev);
err = NOTIFY_OK;
}
}
@@ -3138,8 +3476,8 @@ static int dsa_master_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dsa_user_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -3148,15 +3486,15 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
struct netdev_notifier_changeupper_info *info = ptr;
int err;
- err = dsa_slave_prechangeupper_sanity_check(dev, info);
+ err = dsa_user_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
- err = dsa_master_prechangeupper_sanity_check(dev, info);
+ err = dsa_conduit_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
- err = dsa_lag_master_prechangelower_sanity_check(dev, info);
+ err = dsa_lag_conduit_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
@@ -3164,11 +3502,11 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;
- err = dsa_slave_prechangeupper(dev, ptr);
+ err = dsa_user_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_slave_lag_prechangeupper(dev, ptr);
+ err = dsa_user_lag_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
@@ -3177,15 +3515,15 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
case NETDEV_CHANGEUPPER: {
int err;
- err = dsa_slave_changeupper(dev, ptr);
+ err = dsa_user_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_slave_lag_changeupper(dev, ptr);
+ err = dsa_user_lag_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_master_changeupper(dev, ptr);
+ err = dsa_conduit_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
@@ -3196,13 +3534,13 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
struct dsa_port *dp;
int err = 0;
- if (dsa_slave_dev_check(dev)) {
- dp = dsa_slave_to_port(dev);
+ if (dsa_user_dev_check(dev)) {
+ dp = dsa_user_to_port(dev);
err = dsa_port_lag_change(dp, info->lower_state_info);
}
- /* Mirror LAG port events on DSA masters that are in
+ /* Mirror LAG port events on DSA conduits that are in
* a LAG towards their respective switch CPU ports
*/
if (netdev_uses_dsa(dev)) {
@@ -3215,28 +3553,28 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
}
case NETDEV_CHANGE:
case NETDEV_UP: {
- /* Track state of master port.
- * DSA driver may require the master port (and indirectly
+ /* Track state of conduit port.
+ * DSA driver may require the conduit port (and indirectly
* the tagger) to be available for some special operation.
*/
if (netdev_uses_dsa(dev)) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->ds->dst;
- /* Track when the master port is UP */
- dsa_tree_master_oper_state_change(dst, dev,
- netif_oper_up(dev));
+ /* Track when the conduit port is UP */
+ dsa_tree_conduit_oper_state_change(dst, dev,
+ netif_oper_up(dev));
- /* Track when the master port is ready and can accept
+ /* Track when the conduit port is ready and can accept
* packet.
* NETDEV_UP event is not enough to flag a port as ready.
* We also have to wait for linkwatch_do_dev to dev_activate
* and emit a NETDEV_CHANGE event.
- * We check if a master port is ready by checking if the dev
+ * We check if a conduit port is ready by checking if the dev
* have a qdisc assigned and is not noop.
*/
- dsa_tree_master_admin_state_change(dst, dev,
- !qdisc_tx_is_noop(dev));
+ dsa_tree_conduit_admin_state_change(dst, dev,
+ !qdisc_tx_is_noop(dev));
return NOTIFY_OK;
}
@@ -3254,7 +3592,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
cpu_dp = dev->dsa_ptr;
dst = cpu_dp->ds->dst;
- dsa_tree_master_admin_state_change(dst, dev, false);
+ dsa_tree_conduit_admin_state_change(dst, dev, false);
list_for_each_entry(dp, &dst->ports, list) {
if (!dsa_port_is_user(dp))
@@ -3263,10 +3601,10 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (dp->cpu_dp != cpu_dp)
continue;
- list_add(&dp->slave->close_list, &close_list);
+ list_add(&dp->user->close_list, &close_list);
}
- dev_close_many(&close_list, true);
+ netif_close_many(&close_list, true);
return NOTIFY_OK;
}
@@ -3289,7 +3627,7 @@ dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
switchdev_work->orig_dev, &info.info, NULL);
}
-static void dsa_slave_switchdev_event_work(struct work_struct *work)
+static void dsa_user_switchdev_event_work(struct work_struct *work)
{
struct dsa_switchdev_event_work *switchdev_work =
container_of(work, struct dsa_switchdev_event_work, work);
@@ -3300,7 +3638,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
struct dsa_port *dp;
int err;
- dp = dsa_slave_to_port(dev);
+ dp = dsa_user_to_port(dev);
ds = dp->ds;
switch (switchdev_work->event) {
@@ -3342,7 +3680,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
static bool dsa_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
- const struct dsa_port *dp = dsa_slave_to_port(dev);
+ const struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch_tree *dst = dp->ds->dst;
if (netif_is_bridge_master(foreign_dev))
@@ -3355,13 +3693,13 @@ static bool dsa_foreign_dev_check(const struct net_device *dev,
return true;
}
-static int dsa_slave_fdb_event(struct net_device *dev,
- struct net_device *orig_dev,
- unsigned long event, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info)
+static int dsa_user_fdb_event(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
{
struct dsa_switchdev_event_work *switchdev_work;
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
bool host_addr = fdb_info->is_local;
struct dsa_switch *ds = dp->ds;
@@ -3410,7 +3748,7 @@ static int dsa_slave_fdb_event(struct net_device *dev,
orig_dev->name, fdb_info->addr, fdb_info->vid,
host_addr ? " as host address" : "");
- INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
+ INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
switchdev_work->event = event;
switchdev_work->dev = dev;
switchdev_work->orig_dev = orig_dev;
@@ -3425,8 +3763,8 @@ static int dsa_slave_fdb_event(struct net_device *dev,
}
/* Called under rcu_read_lock() */
-static int dsa_slave_switchdev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int dsa_user_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
@@ -3434,15 +3772,15 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_attr_set);
+ dsa_user_dev_check,
+ dsa_user_port_attr_set);
return notifier_from_errno(err);
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_fdb_event);
+ dsa_user_fdb_event);
return notifier_from_errno(err);
default:
return NOTIFY_DONE;
@@ -3451,8 +3789,8 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
return NOTIFY_OK;
}
-static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int dsa_user_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
@@ -3460,52 +3798,52 @@ static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add_foreign(dev, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_port_obj_add);
+ dsa_user_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del_foreign(dev, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_port_obj_del);
+ dsa_user_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_attr_set);
+ dsa_user_dev_check,
+ dsa_user_port_attr_set);
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
-static struct notifier_block dsa_slave_nb __read_mostly = {
- .notifier_call = dsa_slave_netdevice_event,
+static struct notifier_block dsa_user_nb __read_mostly = {
+ .notifier_call = dsa_user_netdevice_event,
};
-struct notifier_block dsa_slave_switchdev_notifier = {
- .notifier_call = dsa_slave_switchdev_event,
+struct notifier_block dsa_user_switchdev_notifier = {
+ .notifier_call = dsa_user_switchdev_event,
};
-struct notifier_block dsa_slave_switchdev_blocking_notifier = {
- .notifier_call = dsa_slave_switchdev_blocking_event,
+struct notifier_block dsa_user_switchdev_blocking_notifier = {
+ .notifier_call = dsa_user_switchdev_blocking_event,
};
-int dsa_slave_register_notifier(void)
+int dsa_user_register_notifier(void)
{
struct notifier_block *nb;
int err;
- err = register_netdevice_notifier(&dsa_slave_nb);
+ err = register_netdevice_notifier(&dsa_user_nb);
if (err)
return err;
- err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ err = register_switchdev_notifier(&dsa_user_switchdev_notifier);
if (err)
goto err_switchdev_nb;
- nb = &dsa_slave_switchdev_blocking_notifier;
+ nb = &dsa_user_switchdev_blocking_notifier;
err = register_switchdev_blocking_notifier(nb);
if (err)
goto err_switchdev_blocking_nb;
@@ -3513,27 +3851,27 @@ int dsa_slave_register_notifier(void)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
err_switchdev_nb:
- unregister_netdevice_notifier(&dsa_slave_nb);
+ unregister_netdevice_notifier(&dsa_user_nb);
return err;
}
-void dsa_slave_unregister_notifier(void)
+void dsa_user_unregister_notifier(void)
{
struct notifier_block *nb;
int err;
- nb = &dsa_slave_switchdev_blocking_notifier;
+ nb = &dsa_user_switchdev_blocking_notifier;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
if (err)
pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&dsa_slave_nb);
+ err = unregister_netdevice_notifier(&dsa_user_nb);
if (err)
- pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
+ pr_err("DSA: failed to unregister user notifier (%d)\n", err);
}
diff --git a/net/dsa/user.h b/net/dsa/user.h
new file mode 100644
index 000000000000..016884bead3c
--- /dev/null
+++ b/net/dsa/user.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DSA_USER_H
+#define __DSA_USER_H
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netpoll.h>
+#include <linux/types.h>
+#include <net/dsa.h>
+#include <net/gro_cells.h>
+
+struct net_device;
+struct netlink_ext_ack;
+
+extern struct notifier_block dsa_user_switchdev_notifier;
+extern struct notifier_block dsa_user_switchdev_blocking_notifier;
+
+struct dsa_user_priv {
+ /* Copy of CPU port xmit for faster access in user transmit hot path */
+ struct sk_buff * (*xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+
+ struct gro_cells gcells;
+
+ /* DSA port data, such as switch, port index, etc. */
+ struct dsa_port *dp;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+
+ /* TC context */
+ struct list_head mall_tc_list;
+};
+
+void dsa_user_mii_bus_init(struct dsa_switch *ds);
+int dsa_user_create(struct dsa_port *dp);
+void dsa_user_destroy(struct net_device *user_dev);
+int dsa_user_suspend(struct net_device *user_dev);
+int dsa_user_resume(struct net_device *user_dev);
+int dsa_user_register_notifier(void);
+void dsa_user_unregister_notifier(void);
+int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr);
+void dsa_user_host_uc_uninstall(struct net_device *dev);
+void dsa_user_sync_ha(struct net_device *dev);
+void dsa_user_unsync_ha(struct net_device *dev);
+void dsa_user_setup_tagger(struct net_device *user);
+int dsa_user_change_mtu(struct net_device *dev, int new_mtu);
+int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
+ struct netlink_ext_ack *extack);
+int dsa_user_manage_vlan_filtering(struct net_device *dev,
+ bool vlan_filtering);
+
+static inline struct dsa_port *dsa_user_to_port(const struct net_device *dev)
+{
+ struct dsa_user_priv *p = netdev_priv(dev);
+
+ return p->dp;
+}
+
+static inline struct net_device *
+dsa_user_to_conduit(const struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+
+ return dsa_port_to_conduit(dp);
+}
+
+#endif